|
@@@ -2,3 -2,3 +2,4 @@@
|
|
|
2
2
|
/.github/* @Chia-Network/actions-reviewers
|
|
3
3
|
/PRETTY_GOOD_PRACTICES.md @altendky @Chia-Network/required-reviewers
|
|
4
4
|
/pylintrc @altendky @Chia-Network/required-reviewers
|
|
5
|
++
/tests/ether.py @altendky @Chia-Network/required-reviewers
|
|
@@@ -124,4 -124,4 +124,4 @@@ jobs
|
|
|
124
124
|
- name: Add benchmark results to workflow summary
|
|
125
125
|
if: always()
|
|
126
126
|
run: |
|
|
127
|
--
python -m tests.
|
|
127
|
++
python -m tests.process_junit --type benchmark --xml junit-data/benchmarks.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY"
|
|
@@@ -131,7 -131,7 +131,7 @@@ jobs
|
|
|
131
131
|
DEBIAN_FRONTEND: noninteractive
|
|
132
132
|
run: |
|
|
133
133
|
apt-get --yes update
|
|
134
|
--
apt-get install --yes git lsb-release sudo
|
|
134
|
++
apt-get install --yes git lsb-release sudo python3-venv
|
|
135
135
|
|
|
136
136
|
- name: Prepare Fedora
|
|
137
137
|
if: ${{ matrix.distribution.type == 'fedora' }}
|
|
@@@ -141,7 -141,7 +141,7 @@@
|
|
|
141
141
|
- name: Prepare Rocky
|
|
142
142
|
if: ${{ matrix.distribution.type == 'rocky' }}
|
|
143
143
|
run: |
|
|
144
|
--
yum install --assumeyes git sudo
|
|
144
|
++
yum install --assumeyes git sudo python39
|
|
145
145
|
|
|
146
146
|
- name: Prepare Ubuntu
|
|
147
147
|
if: ${{ matrix.distribution.type == 'ubuntu' }}
|
|
@@@ -153,7 -153,7 +153,7 @@@
|
|
|
153
153
|
apt-get install --yes software-properties-common
|
|
154
154
|
add-apt-repository --yes ppa:git-core/ppa
|
|
155
155
|
apt-get --yes update
|
|
156
|
--
apt-get install --yes git lsb-release sudo
|
|
156
|
++
apt-get install --yes git lsb-release sudo python3-venv
|
|
157
157
|
|
|
158
158
|
- name: Add safe git directory
|
|
159
159
|
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
|
@@@ -142,19 -139,18 +142,11 @@@ jobs
|
|
|
142
142
|
path: junit-data/*
|
|
143
143
|
if-no-files-found: error
|
|
144
144
|
|
|
145
|
--
- name: Publish JUnit results
|
|
146
|
--
if: always()
|
|
147
|
-
uses: actions/upload-artifact@v4
|
|
148
|
-
uses: actions/upload-artifact@v3
|
|
149
|
--
with:
|
|
150
|
--
name: junit-results
|
|
151
|
--
path: junit-results/*
|
|
152
|
--
if-no-files-found: error
|
|
153
|
--
|
|
154
145
|
- name: Download Coverage
|
|
155
|
-
uses: actions/download-artifact@
|
|
146
|
+
uses: actions/download-artifact@v4
|
|
156
147
|
with:
|
|
157
|
-
|
|
148
|
+
merge-multiple: true
|
|
149
|
+
pattern: coverage-data-*
|
|
158
150
|
path: coverage-data
|
|
159
151
|
|
|
160
152
|
- name: Set up ${{ matrix.python.name }}
|
|
@@@ -169,6 -165,6 +161,20 @@@
|
|
|
169
161
|
|
|
170
162
|
- uses: chia-network/actions/activate-venv@main
|
|
171
163
|
|
|
164
|
++
- name: Add time out assert results to workflow summary
|
|
165
|
++
if: always()
|
|
166
|
++
run: |
|
|
167
|
++
python -m tests.process_junit --limit 50 --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> "$GITHUB_STEP_SUMMARY"
|
|
168
|
++
python -m tests.process_junit --type time_out_assert --xml junit-results/junit.xml --markdown --link-prefix ${{ github.event.repository.html_url }}/blob/${{ github.sha }}/ --link-line-separator \#L >> junit-results/time_out_assert.md
|
|
169
|
++
|
|
170
|
++
- name: Publish JUnit results
|
|
171
|
++
if: always()
|
|
172
|
++
uses: actions/upload-artifact@v4
|
|
173
|
++
with:
|
|
174
|
++
name: junit-results
|
|
175
|
++
path: junit-results/*
|
|
176
|
++
if-no-files-found: error
|
|
177
|
++
|
|
172
178
|
- name: Coverage Processing
|
|
173
179
|
run: |
|
|
174
180
|
coverage combine --rcfile=.coveragerc --data-file=coverage-reports/.coverage coverage-data/
|
|
@@@ -106,7 -101,7 +106,7 @@@ async def run_mempool_benchmark() -> No
|
|
|
106
106
|
|
|
107
107
|
# add 10 transactions to the mempool
|
|
108
108
|
for i in range(10):
|
|
109
|
--
coin = Coin(make_hash(height * 10 + i), IDENTITY_PUZZLE_HASH, height * 100000 + i * 100)
|
|
109
|
++
coin = Coin(make_hash(height * 10 + i), IDENTITY_PUZZLE_HASH, uint64(height * 100000 + i * 100))
|
|
110
110
|
sb = make_spend_bundle(coin, height)
|
|
111
111
|
# make this coin available via get_coin_record, which is called
|
|
112
112
|
# by mempool_manager
|
|
@@@ -89,6 -87,6 +89,11 @@@ if [ "$PLATFORM" = "arm64" ]; the
|
|
|
89
89
|
# @TODO Maybe versions of sub dependencies should be managed by gem lock file.
|
|
90
90
|
# @TODO Once ruby 2.6 can be installed on `apt install ruby`, installing public_suffix below should be removed.
|
|
91
91
|
sudo gem install public_suffix -v 4.0.7
|
|
92
|
++
# ERROR: Error installing fpm:
|
|
93
|
++
# The last version of dotenv (>= 0) to support your Ruby & RubyGems was 2.8.1. Try installing it with `gem install dotenv -v 2.8.1` and then running the current command again
|
|
94
|
++
# dotenv requires Ruby version >= 3.0. The current ruby version is 2.7.0.0.
|
|
95
|
++
# @TODO Once ruby 3.0 can be installed on `apt install ruby`, installing dotenv below should be removed.
|
|
96
|
++
sudo gem install dotenv -v 2.8.1
|
|
92
97
|
sudo gem install fpm
|
|
93
98
|
echo USE_SYSTEM_FPM=true npx electron-builder build --linux deb --arm64 \
|
|
94
99
|
--config.extraMetadata.name=chia-blockchain \
|
|
@@@ -2823,9 -2823,9 +2823,9 @@@
|
|
|
2823
2823
|
}
|
|
2824
2824
|
},
|
|
2825
2825
|
"node_modules/follow-redirects": {
|
|
2826
|
--
"version": "1.15.
|
|
2827
|
--
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.
|
|
2828
|
--
"integrity": "sha512-
|
|
2826
|
++
"version": "1.15.4",
|
|
2827
|
++
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.4.tgz",
|
|
2828
|
++
"integrity": "sha512-Cr4D/5wlrb0z9dgERpUL3LrmPKVDsETIJhaCMeDfuFYcqa5bldGV6wBsAN6X/vxlXQtFBMrXdXxdL8CbDTGniw==",
|
|
2829
2829
|
"funding": [
|
|
2830
2830
|
{
|
|
2831
2831
|
"type": "individual",
|
|
@@@ -9471,9 -9471,9 +9471,9 @@@
|
|
|
9471
9471
|
"integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ=="
|
|
9472
9472
|
},
|
|
9473
9473
|
"follow-redirects": {
|
|
9474
|
--
"version": "1.15.
|
|
9475
|
--
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.
|
|
9476
|
--
"integrity": "sha512-
|
|
9474
|
++
"version": "1.15.4",
|
|
9475
|
++
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.4.tgz",
|
|
9476
|
++
"integrity": "sha512-Cr4D/5wlrb0z9dgERpUL3LrmPKVDsETIJhaCMeDfuFYcqa5bldGV6wBsAN6X/vxlXQtFBMrXdXxdL8CbDTGniw=="
|
|
9477
9477
|
},
|
|
9478
9478
|
"foreground-child": {
|
|
9479
9479
|
"version": "3.1.1",
|
|
@@@ -2838,9 -2838,9 +2838,9 @@@
|
|
|
2838
2838
|
}
|
|
2839
2839
|
},
|
|
2840
2840
|
"node_modules/follow-redirects": {
|
|
2841
|
--
"version": "1.15.
|
|
2842
|
--
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.
|
|
2843
|
--
"integrity": "sha512-
|
|
2841
|
++
"version": "1.15.4",
|
|
2842
|
++
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.4.tgz",
|
|
2843
|
++
"integrity": "sha512-Cr4D/5wlrb0z9dgERpUL3LrmPKVDsETIJhaCMeDfuFYcqa5bldGV6wBsAN6X/vxlXQtFBMrXdXxdL8CbDTGniw==",
|
|
2844
2844
|
"funding": [
|
|
2845
2845
|
{
|
|
2846
2846
|
"type": "individual",
|
|
@@@ -10080,9 -10080,9 +10080,9 @@@
|
|
|
10080
10080
|
"integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ=="
|
|
10081
10081
|
},
|
|
10082
10082
|
"follow-redirects": {
|
|
10083
|
--
"version": "1.15.
|
|
10084
|
--
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.
|
|
10085
|
--
"integrity": "sha512-
|
|
10083
|
++
"version": "1.15.4",
|
|
10084
|
++
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.4.tgz",
|
|
10085
|
++
"integrity": "sha512-Cr4D/5wlrb0z9dgERpUL3LrmPKVDsETIJhaCMeDfuFYcqa5bldGV6wBsAN6X/vxlXQtFBMrXdXxdL8CbDTGniw=="
|
|
10086
10086
|
},
|
|
10087
10087
|
"foreground-child": {
|
|
10088
10088
|
"version": "3.1.1",
|
|
@@@ -2730,9 -2730,9 +2730,9 @@@
|
|
|
2730
2730
|
}
|
|
2731
2731
|
},
|
|
2732
2732
|
"node_modules/follow-redirects": {
|
|
2733
|
--
"version": "1.15.
|
|
2734
|
--
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.
|
|
2735
|
--
"integrity": "sha512-
|
|
2733
|
++
"version": "1.15.4",
|
|
2734
|
++
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.4.tgz",
|
|
2735
|
++
"integrity": "sha512-Cr4D/5wlrb0z9dgERpUL3LrmPKVDsETIJhaCMeDfuFYcqa5bldGV6wBsAN6X/vxlXQtFBMrXdXxdL8CbDTGniw==",
|
|
2736
2736
|
"funding": [
|
|
2737
2737
|
{
|
|
2738
2738
|
"type": "individual",
|
|
@@@ -9291,9 -9291,9 +9291,9 @@@
|
|
|
9291
9291
|
"integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ=="
|
|
9292
9292
|
},
|
|
9293
9293
|
"follow-redirects": {
|
|
9294
|
--
"version": "1.15.
|
|
9295
|
--
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.
|
|
9296
|
--
"integrity": "sha512-
|
|
9294
|
++
"version": "1.15.4",
|
|
9295
|
++
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.4.tgz",
|
|
9296
|
++
"integrity": "sha512-Cr4D/5wlrb0z9dgERpUL3LrmPKVDsETIJhaCMeDfuFYcqa5bldGV6wBsAN6X/vxlXQtFBMrXdXxdL8CbDTGniw=="
|
|
9297
9297
|
},
|
|
9298
9298
|
"foreground-child": {
|
|
9299
9299
|
"version": "3.1.1",
|
|
@@@ -17,6 -14,6 +17,7 @@@ from chia.consensus.cost_calculator imp
|
|
|
17
17
|
from chia.consensus.default_constants import DEFAULT_CONSTANTS
|
|
18
18
|
from chia.full_node.bundle_tools import simple_solution_generator
|
|
19
19
|
from chia.full_node.coin_store import CoinStore
|
|
20
|
++
from chia.full_node.hint_store import HintStore
|
|
20
21
|
from chia.full_node.mempool import Mempool
|
|
21
22
|
from chia.full_node.mempool_check_conditions import get_name_puzzle_conditions, get_puzzle_and_solution_for_coin
|
|
22
23
|
from chia.full_node.mempool_manager import MempoolManager
|
|
@@@ -34,6 -31,6 +35,7 @@@ from chia.util.errors import Err, Valid
|
|
|
34
35
|
from chia.util.hash import std_hash
|
|
35
36
|
from chia.util.ints import uint32, uint64
|
|
36
37
|
from chia.util.streamable import Streamable, streamable
|
|
38
|
++
from chia.wallet.util.compute_hints import HintedCoin, compute_spend_hints_and_additions
|
|
37
39
|
|
|
38
40
|
"""
|
|
39
41
|
The purpose of this file is to provide a lightweight simulator for the testing of Chialisp smart contracts.
|
|
@@@ -143,57 -141,56 +145,59 @@@ class SpendSim
|
|
|
143
145
|
timestamp: uint64
|
|
144
146
|
block_height: uint32
|
|
145
147
|
defaults: ConsensusConstants
|
|
148
|
++
hint_store: HintStore
|
|
146
149
|
|
|
147
150
|
@classmethod
|
|
148
|
-
|
|
151
|
+
@contextlib.asynccontextmanager
|
|
152
|
+
async def managed(
|
|
149
153
|
cls: Type[_T_SpendSim], db_path: Optional[Path] = None, defaults: ConsensusConstants = DEFAULT_CONSTANTS
|
|
150
|
-
) -> _T_SpendSim:
|
|
154
|
+
) -> AsyncIterator[_T_SpendSim]:
|
|
151
155
|
self = cls()
|
|
152
156
|
if db_path is None:
|
|
153
157
|
uri = f"file:db_{random.randint(0, 99999999)}?mode=memory&cache=shared"
|
|
154
158
|
else:
|
|
155
159
|
uri = f"file:{db_path}"
|
|
156
160
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
await conn.execute("CREATE TABLE IF NOT EXISTS block_data(data blob PRIMARY_KEY)")
|
|
166
|
-
cursor = await conn.execute("SELECT * from block_data")
|
|
167
|
-
row = await cursor.fetchone()
|
|
168
|
-
await cursor.close()
|
|
169
|
-
if row is not None:
|
|
170
|
-
store_data = SimStore.from_bytes(row[0])
|
|
171
|
-
self.timestamp = store_data.timestamp
|
|
172
|
-
self.block_height = store_data.block_height
|
|
173
|
-
self.block_records = store_data.block_records
|
|
174
|
-
self.blocks = store_data.blocks
|
|
175
|
-
self.mempool_manager.peak = self.block_records[-1]
|
|
176
|
-
else:
|
|
177
|
-
self.timestamp = uint64(1)
|
|
178
|
-
self.block_height = uint32(0)
|
|
179
|
-
self.block_records = []
|
|
180
|
-
self.blocks = []
|
|
181
|
-
return self
|
|
182
|
-
|
|
183
|
-
async def close(self) -> None:
|
|
184
|
-
async with self.db_wrapper.writer_maybe_transaction() as conn:
|
|
185
|
-
c = await conn.execute("DELETE FROM block_data")
|
|
186
|
-
await c.close()
|
|
187
|
-
c = await conn.execute(
|
|
188
|
-
"INSERT INTO block_data VALUES(?)",
|
|
189
|
-
(bytes(SimStore(self.timestamp, self.block_height, self.block_records, self.blocks)),),
|
|
190
|
-
)
|
|
191
|
-
await c.close()
|
|
192
|
-
await self.db_wrapper.close()
|
|
193
|
-
|
|
194
|
-
async def new_peak(self) -> None:
|
|
195
|
-
await self.mempool_manager.new_peak(self.block_records[-1], None)
|
|
161
|
+
async with DBWrapper2.managed(database=uri, uri=True, reader_count=1, db_version=2) as self.db_wrapper:
|
|
162
|
+
self.coin_store = await CoinStore.create(self.db_wrapper)
|
|
163
|
++
self.hint_store = await HintStore.create(self.db_wrapper)
|
|
164
|
+
self.mempool_manager = MempoolManager(self.coin_store.get_coin_records, defaults)
|
|
165
|
+
self.defaults = defaults
|
|
166
|
+
|
|
167
|
+
# Load the next data if there is any
|
|
168
|
+
async with self.db_wrapper.writer_maybe_transaction() as conn:
|
|
196
|
-
await conn.execute("CREATE TABLE IF NOT EXISTS block_data(data blob
|
|
169
|
++
await conn.execute("CREATE TABLE IF NOT EXISTS block_data(data blob PRIMARY KEY)")
|
|
170
|
+
cursor = await conn.execute("SELECT * from block_data")
|
|
171
|
+
row = await cursor.fetchone()
|
|
172
|
+
await cursor.close()
|
|
173
|
+
if row is not None:
|
|
174
|
+
store_data = SimStore.from_bytes(row[0])
|
|
175
|
+
self.timestamp = store_data.timestamp
|
|
176
|
+
self.block_height = store_data.block_height
|
|
177
|
+
self.block_records = store_data.block_records
|
|
178
|
+
self.blocks = store_data.blocks
|
|
179
|
+
self.mempool_manager.peak = self.block_records[-1]
|
|
180
|
+
else:
|
|
181
|
+
self.timestamp = uint64(1)
|
|
182
|
+
self.block_height = uint32(0)
|
|
183
|
+
self.block_records = []
|
|
184
|
+
self.blocks = []
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
yield self
|
|
188
|
+
finally:
|
|
189
|
+
with anyio.CancelScope(shield=True):
|
|
190
|
+
async with self.db_wrapper.writer_maybe_transaction() as conn:
|
|
191
|
+
c = await conn.execute("DELETE FROM block_data")
|
|
192
|
+
await c.close()
|
|
193
|
+
c = await conn.execute(
|
|
194
|
+
"INSERT INTO block_data VALUES(?)",
|
|
195
|
+
(bytes(SimStore(self.timestamp, self.block_height, self.block_records, self.blocks)),),
|
|
196
|
+
)
|
|
197
|
+
await c.close()
|
|
198
|
+
|
|
199
|
+
async def new_peak(self, spent_coins_ids: Optional[List[bytes32]]) -> None:
|
|
200
|
+
await self.mempool_manager.new_peak(self.block_records[-1], spent_coins_ids)
|
|
197
201
|
|
|
198
202
|
def new_coin_record(self, coin: Coin, coinbase: bool = False) -> CoinRecord:
|
|
199
203
|
return CoinRecord(
|
|
@@@ -268,12 -260,11 +272,19 @@@
|
|
|
268
272
|
if result is not None:
|
|
269
273
|
bundle, additions = result
|
|
270
274
|
generator_bundle = bundle
|
|
275
|
++
for spend in generator_bundle.coin_spends:
|
|
276
|
++
hint_dict, _ = compute_spend_hints_and_additions(spend)
|
|
277
|
++
hints: List[Tuple[bytes32, bytes]] = []
|
|
278
|
++
hint_obj: HintedCoin
|
|
279
|
++
for coin_name, hint_obj in hint_dict.items():
|
|
280
|
++
if hint_obj.hint is not None:
|
|
281
|
++
hints.append((coin_name, bytes(hint_obj.hint)))
|
|
282
|
++
await self.hint_store.add_hints(hints)
|
|
271
283
|
return_additions = additions
|
|
272
284
|
return_removals = bundle.removals()
|
|
273
|
-
|
|
285
|
+
spent_coins_ids = [r.name() for r in return_removals]
|
|
274
|
-
|
|
275
286
|
await self.coin_store._add_coin_records([self.new_coin_record(addition) for addition in additions])
|
|
276
|
-
await self.coin_store._set_spent(
|
|
287
|
+
await self.coin_store._set_spent(spent_coins_ids, uint32(self.block_height + 1))
|
|
277
288
|
|
|
278
289
|
# SimBlockRecord is created
|
|
279
290
|
generator: Optional[BlockGenerator] = await self.generate_transaction_generator(generator_bundle)
|
|
@@@ -449,3 -440,3 +460,31 @@@ class SimClient
|
|
|
449
460
|
return None
|
|
450
461
|
else:
|
|
451
462
|
return item.__dict__
|
|
463
|
++
|
|
464
|
++
async def get_coin_records_by_hint(
|
|
465
|
++
self,
|
|
466
|
++
hint: bytes32,
|
|
467
|
++
include_spent_coins: bool = True,
|
|
468
|
++
start_height: Optional[int] = None,
|
|
469
|
++
end_height: Optional[int] = None,
|
|
470
|
++
) -> List[CoinRecord]:
|
|
471
|
++
"""
|
|
472
|
++
Retrieves coins by hint, by default returns unspent coins.
|
|
473
|
++
"""
|
|
474
|
++
names: List[bytes32] = await self.service.hint_store.get_coin_ids(hint)
|
|
475
|
++
|
|
476
|
++
kwargs: Dict[str, Any] = {
|
|
477
|
++
"include_spent_coins": False,
|
|
478
|
++
"names": names,
|
|
479
|
++
}
|
|
480
|
++
if start_height:
|
|
481
|
++
kwargs["start_height"] = uint32(start_height)
|
|
482
|
++
if end_height:
|
|
483
|
++
kwargs["end_height"] = uint32(end_height)
|
|
484
|
++
|
|
485
|
++
if include_spent_coins:
|
|
486
|
++
kwargs["include_spent_coins"] = include_spent_coins
|
|
487
|
++
|
|
488
|
++
coin_records = await self.service.coin_store.get_coin_records_by_names(**kwargs)
|
|
489
|
++
|
|
490
|
++
return coin_records
|
|
@@@ -32,6 -32,6 +32,7 @@@ from chia.wallet.transaction_record imp
|
|
|
32
32
|
from chia.wallet.util.tx_config import CoinSelectionConfig, CoinSelectionConfigLoader, TXConfig, TXConfigLoader
|
|
33
33
|
|
|
34
34
|
NODE_TYPES: Dict[str, Type[RpcClient]] = {
|
|
35
|
++
"base": RpcClient,
|
|
35
36
|
"farmer": FarmerRpcClient,
|
|
36
37
|
"wallet": WalletRpcClient,
|
|
37
38
|
"full_node": FullNodeRpcClient,
|
|
@@@ -41,6 -41,6 +42,7 @@@
|
|
|
41
42
|
}
|
|
42
43
|
|
|
43
44
|
node_config_section_names: Dict[Type[RpcClient], str] = {
|
|
45
|
++
RpcClient: "base",
|
|
44
46
|
FarmerRpcClient: "farmer",
|
|
45
47
|
WalletRpcClient: "wallet",
|
|
46
48
|
FullNodeRpcClient: "full_node",
|
|
@@@ -92,6 -92,6 +94,7 @@@ async def get_any_service_client
|
|
|
92
94
|
rpc_port: Optional[int] = None,
|
|
93
95
|
root_path: Optional[Path] = None,
|
|
94
96
|
consume_errors: bool = True,
|
|
97
|
++
use_ssl: bool = True,
|
|
95
98
|
) -> AsyncIterator[Tuple[_T_RpcClient, Dict[str, Any]]]:
|
|
96
99
|
"""
|
|
97
100
|
Yields a tuple with a RpcClient for the applicable node type a dictionary of the node's configuration,
|
|
@@@ -112,7 -112,7 +115,11 @@@
|
|
|
112
115
|
if rpc_port is None:
|
|
113
116
|
rpc_port = config[node_type]["rpc_port"]
|
|
114
117
|
# select node client type based on string
|
|
115
|
--
|
|
118
|
++
if use_ssl:
|
|
119
|
++
node_client = await client_type.create(self_hostname, uint16(rpc_port), root_path=root_path, net_config=config)
|
|
120
|
++
else:
|
|
121
|
++
node_client = await client_type.create(self_hostname, uint16(rpc_port), root_path=None, net_config=None)
|
|
122
|
++
|
|
116
123
|
try:
|
|
117
124
|
# check if we can connect to node
|
|
118
125
|
await validate_client_connection(node_client, node_type, rpc_port, consume_errors)
|
|
@@@ -51,14 -51,14 +51,15 @@@ def create_changelist_option() -> Calla
|
|
|
51
51
|
)
|
|
52
52
|
|
|
53
53
|
|
|
54
|
--
def create_key_option() -> Callable[[FC], FC]:
|
|
54
|
++
def create_key_option(multiple: bool = False) -> Callable[[FC], FC]:
|
|
55
55
|
return click.option(
|
|
56
56
|
"-k",
|
|
57
57
|
"--key",
|
|
58
|
--
"key_string",
|
|
58
|
++
"key_strings" if multiple else "key_string",
|
|
59
59
|
help="str representing the key",
|
|
60
60
|
type=str,
|
|
61
61
|
required=True,
|
|
62
|
++
multiple=multiple,
|
|
62
63
|
)
|
|
63
64
|
|
|
64
65
|
|
|
@@@ -106,6 -106,6 +107,25 @@@ def create_fee_option() -> Callable[[FC
|
|
|
106
107
|
)
|
|
107
108
|
|
|
108
109
|
|
|
110
|
++
def create_page_option() -> Callable[[FC], FC]:
|
|
111
|
++
return click.option(
|
|
112
|
++
"-p",
|
|
113
|
++
"--page",
|
|
114
|
++
help="Enables pagination of the output and requests a specific page.",
|
|
115
|
++
type=int,
|
|
116
|
++
required=False,
|
|
117
|
++
)
|
|
118
|
++
|
|
119
|
++
|
|
120
|
++
def create_max_page_size_option() -> Callable[[FC], FC]:
|
|
121
|
++
return click.option(
|
|
122
|
++
"--max-page-size",
|
|
123
|
++
help="Set how many bytes to be included in a page, if pagination is enabled.",
|
|
124
|
++
type=int,
|
|
125
|
++
required=False,
|
|
126
|
++
)
|
|
127
|
++
|
|
128
|
++
|
|
109
129
|
@data_cmd.command("create_data_store", help="Create a new data store")
|
|
110
130
|
@create_rpc_port_option()
|
|
111
131
|
@create_fee_option()
|
|
@@@ -171,15 -171,15 +191,19 @@@ def update_data_store
|
|
|
171
191
|
@click.option("-r", "--root_hash", help="The hexadecimal root hash", type=str, required=False)
|
|
172
192
|
@create_rpc_port_option()
|
|
173
193
|
@options.create_fingerprint()
|
|
194
|
++
@create_page_option()
|
|
195
|
++
@create_max_page_size_option()
|
|
174
196
|
def get_keys(
|
|
175
197
|
id: str,
|
|
176
198
|
root_hash: Optional[str],
|
|
177
199
|
data_rpc_port: int,
|
|
178
200
|
fingerprint: Optional[int],
|
|
201
|
++
page: Optional[int],
|
|
202
|
++
max_page_size: Optional[int],
|
|
179
203
|
) -> None:
|
|
180
204
|
from chia.cmds.data_funcs import get_keys_cmd
|
|
181
205
|
|
|
182
|
--
run(get_keys_cmd(data_rpc_port, id, root_hash, fingerprint=fingerprint))
|
|
206
|
++
run(get_keys_cmd(data_rpc_port, id, root_hash, fingerprint=fingerprint, page=page, max_page_size=max_page_size))
|
|
183
207
|
|
|
184
208
|
|
|
185
209
|
@data_cmd.command("get_keys_values", help="Get all keys and values for a given store")
|
|
@@@ -187,15 -187,15 +211,23 @@@
|
|
|
187
211
|
@click.option("-r", "--root_hash", help="The hexadecimal root hash", type=str, required=False)
|
|
188
212
|
@create_rpc_port_option()
|
|
189
213
|
@options.create_fingerprint()
|
|
214
|
++
@create_page_option()
|
|
215
|
++
@create_max_page_size_option()
|
|
190
216
|
def get_keys_values(
|
|
191
217
|
id: str,
|
|
192
218
|
root_hash: Optional[str],
|
|
193
219
|
data_rpc_port: int,
|
|
194
220
|
fingerprint: Optional[int],
|
|
221
|
++
page: Optional[int],
|
|
222
|
++
max_page_size: Optional[int],
|
|
195
223
|
) -> None:
|
|
196
224
|
from chia.cmds.data_funcs import get_keys_values_cmd
|
|
197
225
|
|
|
198
|
--
run(
|
|
226
|
++
run(
|
|
227
|
++
get_keys_values_cmd(
|
|
228
|
++
data_rpc_port, id, root_hash, fingerprint=fingerprint, page=page, max_page_size=max_page_size
|
|
229
|
++
)
|
|
230
|
++
)
|
|
199
231
|
|
|
200
232
|
|
|
201
233
|
@data_cmd.command("get_root", help="Get the published root hash value for a given store")
|
|
@@@ -275,16 -275,16 +307,30 @@@ def unsubscribe
|
|
|
275
307
|
@click.option("-hash_2", "--hash_2", help="Final hash", type=str)
|
|
276
308
|
@create_rpc_port_option()
|
|
277
309
|
@options.create_fingerprint()
|
|
310
|
++
@create_page_option()
|
|
311
|
++
@create_max_page_size_option()
|
|
278
312
|
def get_kv_diff(
|
|
279
313
|
id: str,
|
|
280
314
|
hash_1: str,
|
|
281
315
|
hash_2: str,
|
|
282
316
|
data_rpc_port: int,
|
|
283
317
|
fingerprint: Optional[int],
|
|
318
|
++
page: Optional[int],
|
|
319
|
++
max_page_size: Optional[int],
|
|
284
320
|
) -> None:
|
|
285
321
|
from chia.cmds.data_funcs import get_kv_diff_cmd
|
|
286
322
|
|
|
287
|
--
run(
|
|
323
|
++
run(
|
|
324
|
++
get_kv_diff_cmd(
|
|
325
|
++
rpc_port=data_rpc_port,
|
|
326
|
++
store_id=id,
|
|
327
|
++
hash_1=hash_1,
|
|
328
|
++
hash_2=hash_2,
|
|
329
|
++
fingerprint=fingerprint,
|
|
330
|
++
page=page,
|
|
331
|
++
max_page_size=max_page_size,
|
|
332
|
++
)
|
|
333
|
++
)
|
|
288
334
|
|
|
289
335
|
|
|
290
336
|
@data_cmd.command("get_root_history", help="Get all changes of a singleton")
|
|
@@@ -530,3 -529,3 +576,48 @@@ def wallet_log_in
|
|
|
530
576
|
fingerprint=fingerprint,
|
|
531
577
|
)
|
|
532
578
|
)
|
|
579
|
++
|
|
580
|
++
|
|
581
|
++
@data_cmd.command(
|
|
582
|
++
"get_proof",
|
|
583
|
++
help="Obtains a merkle proof of inclusion for a given key",
|
|
584
|
++
)
|
|
585
|
++
@create_data_store_id_option()
|
|
586
|
++
@create_rpc_port_option()
|
|
587
|
++
@create_key_option(multiple=True)
|
|
588
|
++
@options.create_fingerprint()
|
|
589
|
++
def get_proof(
|
|
590
|
++
id: str,
|
|
591
|
++
key_strings: List[str],
|
|
592
|
++
data_rpc_port: int,
|
|
593
|
++
fingerprint: Optional[int],
|
|
594
|
++
) -> None:
|
|
595
|
++
from chia.cmds.data_funcs import get_proof_cmd
|
|
596
|
++
|
|
597
|
++
store_id = bytes32.from_hexstr(id)
|
|
598
|
++
|
|
599
|
++
run(get_proof_cmd(rpc_port=data_rpc_port, store_id=store_id, fingerprint=fingerprint, key_strings=key_strings))
|
|
600
|
++
|
|
601
|
++
|
|
602
|
++
@data_cmd.command(
|
|
603
|
++
"verify_proof",
|
|
604
|
++
help="Verifies a merkle proof of inclusion",
|
|
605
|
++
)
|
|
606
|
++
@click.option(
|
|
607
|
++
"-p",
|
|
608
|
++
"--proof",
|
|
609
|
++
"proof_string",
|
|
610
|
++
help="Proof to validate in JSON format.",
|
|
611
|
++
type=str,
|
|
612
|
++
)
|
|
613
|
++
@create_rpc_port_option()
|
|
614
|
++
@options.create_fingerprint()
|
|
615
|
++
def verify_proof(
|
|
616
|
++
proof_string: str,
|
|
617
|
++
data_rpc_port: int,
|
|
618
|
++
fingerprint: Optional[int],
|
|
619
|
++
) -> None:
|
|
620
|
++
from chia.cmds.data_funcs import verify_proof_cmd
|
|
621
|
++
|
|
622
|
++
proof_dict = json.loads(proof_string)
|
|
623
|
++
run(verify_proof_cmd(rpc_port=data_rpc_port, fingerprint=fingerprint, proof=proof_dict))
|
|
@@@ -83,26 -83,26 +83,42 @@@ async def get_keys_cmd
|
|
|
83
83
|
store_id: str,
|
|
84
84
|
root_hash: Optional[str],
|
|
85
85
|
fingerprint: Optional[int],
|
|
86
|
--
|
|
86
|
++
page: Optional[int],
|
|
87
|
++
max_page_size: Optional[int],
|
|
88
|
++
root_path: Optional[Path] = None,
|
|
89
|
++
) -> Dict[str, Any]:
|
|
87
90
|
store_id_bytes = bytes32.from_hexstr(store_id)
|
|
88
91
|
root_hash_bytes = None if root_hash is None else bytes32.from_hexstr(root_hash)
|
|
89
|
--
|
|
90
|
--
|
|
92
|
++
res = dict()
|
|
93
|
++
async with get_client(rpc_port=rpc_port, fingerprint=fingerprint, root_path=root_path) as (client, _):
|
|
94
|
++
res = await client.get_keys(
|
|
95
|
++
store_id=store_id_bytes, root_hash=root_hash_bytes, page=page, max_page_size=max_page_size
|
|
96
|
++
)
|
|
91
97
|
print(json.dumps(res, indent=4, sort_keys=True))
|
|
92
98
|
|
|
99
|
++
return res
|
|
100
|
++
|
|
93
101
|
|
|
94
102
|
async def get_keys_values_cmd(
|
|
95
103
|
rpc_port: Optional[int],
|
|
96
104
|
store_id: str,
|
|
97
105
|
root_hash: Optional[str],
|
|
98
106
|
fingerprint: Optional[int],
|
|
99
|
--
|
|
107
|
++
page: Optional[int],
|
|
108
|
++
max_page_size: Optional[int],
|
|
109
|
++
root_path: Optional[Path] = None,
|
|
110
|
++
) -> Dict[str, Any]:
|
|
100
111
|
store_id_bytes = bytes32.from_hexstr(store_id)
|
|
101
112
|
root_hash_bytes = None if root_hash is None else bytes32.from_hexstr(root_hash)
|
|
102
|
--
|
|
103
|
--
|
|
113
|
++
res = dict()
|
|
114
|
++
async with get_client(rpc_port=rpc_port, fingerprint=fingerprint, root_path=root_path) as (client, _):
|
|
115
|
++
res = await client.get_keys_values(
|
|
116
|
++
store_id=store_id_bytes, root_hash=root_hash_bytes, page=page, max_page_size=max_page_size
|
|
117
|
++
)
|
|
104
118
|
print(json.dumps(res, indent=4, sort_keys=True))
|
|
105
119
|
|
|
120
|
++
return res
|
|
121
|
++
|
|
106
122
|
|
|
107
123
|
async def get_root_cmd(
|
|
108
124
|
rpc_port: Optional[int],
|
|
@@@ -157,14 -157,14 +173,23 @@@ async def get_kv_diff_cmd
|
|
|
157
173
|
hash_1: str,
|
|
158
174
|
hash_2: str,
|
|
159
175
|
fingerprint: Optional[int],
|
|
160
|
--
|
|
176
|
++
page: Optional[int],
|
|
177
|
++
max_page_size: Optional[int],
|
|
178
|
++
root_path: Optional[Path] = None,
|
|
179
|
++
) -> Dict[str, Any]:
|
|
161
180
|
store_id_bytes = bytes32.from_hexstr(store_id)
|
|
162
181
|
hash_1_bytes = bytes32.from_hexstr(hash_1)
|
|
163
182
|
hash_2_bytes = bytes32.from_hexstr(hash_2)
|
|
164
|
--
|
|
165
|
--
|
|
183
|
++
res = dict()
|
|
184
|
++
|
|
185
|
++
async with get_client(rpc_port=rpc_port, fingerprint=fingerprint, root_path=root_path) as (client, _):
|
|
186
|
++
res = await client.get_kv_diff(
|
|
187
|
++
store_id=store_id_bytes, hash_1=hash_1_bytes, hash_2=hash_2_bytes, page=page, max_page_size=max_page_size
|
|
188
|
++
)
|
|
166
189
|
print(json.dumps(res, indent=4, sort_keys=True))
|
|
167
190
|
|
|
191
|
++
return res
|
|
192
|
++
|
|
168
193
|
|
|
169
194
|
async def get_root_history_cmd(
|
|
170
195
|
rpc_port: Optional[int],
|
|
@@@ -286,3 -286,3 +311,32 @@@ async def clear_pending_roots
|
|
|
286
311
|
print(json.dumps(result, indent=4, sort_keys=True))
|
|
287
312
|
|
|
288
313
|
return result
|
|
314
|
++
|
|
315
|
++
|
|
316
|
++
async def get_proof_cmd(
|
|
317
|
++
store_id: bytes32,
|
|
318
|
++
key_strings: List[str],
|
|
319
|
++
rpc_port: Optional[int],
|
|
320
|
++
root_path: Optional[Path] = None,
|
|
321
|
++
fingerprint: Optional[int] = None,
|
|
322
|
++
) -> Dict[str, Any]:
|
|
323
|
++
result = dict()
|
|
324
|
++
async with get_client(rpc_port=rpc_port, fingerprint=fingerprint, root_path=root_path) as (client, _):
|
|
325
|
++
result = await client.get_proof(store_id=store_id, keys=[hexstr_to_bytes(key) for key in key_strings])
|
|
326
|
++
print(json.dumps(result, indent=4, sort_keys=True))
|
|
327
|
++
|
|
328
|
++
return result
|
|
329
|
++
|
|
330
|
++
|
|
331
|
++
async def verify_proof_cmd(
|
|
332
|
++
proof: Dict[str, Any],
|
|
333
|
++
rpc_port: Optional[int],
|
|
334
|
++
root_path: Optional[Path] = None,
|
|
335
|
++
fingerprint: Optional[int] = None,
|
|
336
|
++
) -> Dict[str, Any]:
|
|
337
|
++
result = dict()
|
|
338
|
++
async with get_client(rpc_port=rpc_port, fingerprint=fingerprint, root_path=root_path) as (client, _):
|
|
339
|
++
result = await client.verify_proof(proof=proof)
|
|
340
|
++
print(json.dumps(result, indent=4, sort_keys=True))
|
|
341
|
++
|
|
342
|
++
return result
|
|
@@@ -1,6 -1,6 +1,7 @@@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
|
++
import pathlib
|
|
4
5
|
from decimal import Decimal
|
|
5
6
|
from typing import List, Optional, Sequence
|
|
6
7
|
|
|
@@@ -441,9 -441,10 +442,15 @@@ def add_token_cmd(wallet_rpc_port: Opti
|
|
|
441
442
|
"-r",
|
|
442
443
|
"--request",
|
|
443
444
|
help="A wallet id of an asset to receive and the amount you wish to receive (formatted like wallet_id:amount)",
|
|
444
|
-
required=True,
|
|
445
445
|
multiple=True,
|
|
446
446
|
)
|
|
447
|
--
@click.option(
|
|
447
|
++
@click.option(
|
|
448
|
++
"-p",
|
|
449
|
++
"--filepath",
|
|
450
|
++
help="The path to write the generated offer file to",
|
|
451
|
++
required=True,
|
|
452
|
++
type=click.Path(dir_okay=False, writable=True, path_type=pathlib.Path),
|
|
453
|
++
)
|
|
448
454
|
@click.option(
|
|
449
455
|
"-m", "--fee", help="A fee to add to the offer when it gets taken, in XCH", default="0", show_default=True
|
|
450
456
|
)
|
|
@@@ -459,10 -459,9 +466,10 @@@ def make_offer_cmd
|
|
|
459
466
|
fingerprint: int,
|
|
460
467
|
offer: Sequence[str],
|
|
461
468
|
request: Sequence[str],
|
|
462
|
--
filepath:
|
|
469
|
++
filepath: pathlib.Path,
|
|
463
470
|
fee: str,
|
|
464
471
|
reuse: bool,
|
|
472
|
+
override: bool,
|
|
465
473
|
) -> None:
|
|
466
474
|
from .wallet_funcs import make_offer
|
|
467
475
|
|
|
@@@ -413,7 -412,7 +413,7 @@@ async def make_offer
|
|
|
413
413
|
d_fee: Decimal,
|
|
414
414
|
offers: Sequence[str],
|
|
415
415
|
requests: Sequence[str],
|
|
416
|
--
filepath:
|
|
416
|
++
filepath: pathlib.Path,
|
|
417
417
|
reuse_puzhash: Optional[bool],
|
|
418
418
|
) -> None:
|
|
419
419
|
async with get_wallet_client(wallet_rpc_port, fp) as (wallet_client, fingerprint, config):
|
|
@@@ -550,23 -549,23 +550,24 @@@
|
|
|
550
550
|
|
|
551
551
|
cli_confirm("Confirm (y/n): ", "Not creating offer...")
|
|
552
552
|
|
|
553
|
--
|
|
554
|
--
|
|
555
|
--
|
|
556
|
--
|
|
557
|
--
|
|
558
|
--
|
|
559
|
--
|
|
560
|
--
|
|
561
|
--
if offer is not None:
|
|
562
|
--
with open(pathlib.Path(filepath), "w") as file:
|
|
563
|
--
file.write(offer.to_bech32())
|
|
564
|
--
print(f"Created offer with ID {trade_record.trade_id}")
|
|
565
|
--
print(
|
|
566
|
--
f"Use chia wallet get_offers --id " f"{trade_record.trade_id} -f {fingerprint} to view status"
|
|
553
|
++
with filepath.open(mode="w") as file:
|
|
554
|
++
offer, trade_record = await wallet_client.create_offer_for_ids(
|
|
555
|
++
offer_dict,
|
|
556
|
++
driver_dict=driver_dict,
|
|
557
|
++
fee=fee,
|
|
558
|
++
tx_config=CMDTXConfigLoader(
|
|
559
|
++
reuse_puzhash=reuse_puzhash,
|
|
560
|
++
).to_tx_config(units["chia"], config, fingerprint),
|
|
567
561
|
)
|
|
568
|
--
|
|
569
|
--
|
|
562
|
++
if offer is not None:
|
|
563
|
++
file.write(offer.to_bech32())
|
|
564
|
++
print(f"Created offer with ID {trade_record.trade_id}")
|
|
565
|
++
print(
|
|
566
|
++
f"Use chia wallet get_offers --id "
|
|
567
|
++
f"{trade_record.trade_id} -f {fingerprint} to view status"
|
|
568
|
++
)
|
|
569
|
++
else:
|
|
570
|
++
print("Error creating offer")
|
|
570
571
|
|
|
571
572
|
|
|
572
573
|
def timestamp_to_time(timestamp: int) -> str:
|
|
@@@ -106,24 -70,15 +106,22 @@@ class ForkInfo
|
|
|
106
106
|
assert block.foliage_transaction_block is not None
|
|
107
107
|
timestamp = block.foliage_transaction_block.timestamp
|
|
108
108
|
for spend in npc_result.conds.spends:
|
|
109
|
-
self.removals_since_fork.
|
|
110
|
-
for puzzle_hash, amount,
|
|
109
|
+
self.removals_since_fork[bytes32(spend.coin_id)] = ForkRem(bytes32(spend.puzzle_hash), height)
|
|
110
|
+
for puzzle_hash, amount, hint in spend.create_coin:
|
|
111
111
|
coin = Coin(bytes32(spend.coin_id), bytes32(puzzle_hash), uint64(amount))
|
|
112
|
-
self.additions_since_fork[coin.name()] = ForkAdd(
|
|
113
|
-
coin, uint32(height), uint64(timestamp), hint, False
|
|
114
|
-
)
|
|
115
|
-
self.additions_since_fork[coin.name()] = (coin, height, timestamp)
|
|
112
|
++
self.additions_since_fork[coin.name()] = ForkAdd(coin, height, timestamp, hint, False)
|
|
116
113
|
for coin in block.get_included_reward_coins():
|
|
117
114
|
assert block.foliage_transaction_block is not None
|
|
118
115
|
timestamp = block.foliage_transaction_block.timestamp
|
|
119
116
|
assert coin.name() not in self.additions_since_fork
|
|
120
|
-
self.additions_since_fork[coin.name()] = ForkAdd(coin,
|
|
121
|
-
|
|
117
|
++
self.additions_since_fork[coin.name()] = ForkAdd(coin, block.height, timestamp, None, True)
|
|
118
|
+
|
|
119
|
+
def rollback(self, header_hash: bytes32, height: int) -> None:
|
|
120
|
+
assert height <= self.peak_height
|
|
121
|
+
self.peak_height = height
|
|
122
|
+
self.peak_hash = header_hash
|
|
123
|
+
self.additions_since_fork = {k: v for k, v in self.additions_since_fork.items() if v.confirmed_height <= height}
|
|
124
|
+
self.removals_since_fork = {k: v for k, v in self.removals_since_fork.items() if v.height <= height}
|
|
122
125
|
|
|
123
126
|
|
|
124
127
|
async def validate_block_body(
|
|
@@@ -48,7 -48,7 +48,7 @@@ class ConsensusConstants
|
|
|
48
48
|
# Size of mempool = 10x the size of block
|
|
49
49
|
MEMPOOL_BLOCK_BUFFER: int
|
|
50
50
|
# Max coin amount uint(1 << 64). This allows coin amounts to fit in 64 bits. This is around 18M chia.
|
|
51
|
--
MAX_COIN_AMOUNT:
|
|
51
|
++
MAX_COIN_AMOUNT: uint64
|
|
52
52
|
# Max block cost in clvm cost units
|
|
53
53
|
MAX_BLOCK_COST_CLVM: int
|
|
54
54
|
# Cost per byte of generator program
|
|
@@@ -32,7 -32,7 +32,10 @@@ from chia.data_layer.data_layer_errors
|
|
|
32
32
|
from chia.data_layer.data_layer_util import (
|
|
33
33
|
DiffData,
|
|
34
34
|
InternalNode,
|
|
35
|
++
KeysPaginationData,
|
|
36
|
++
KeysValuesPaginationData,
|
|
35
37
|
KeyValue,
|
|
38
|
++
KVDiffPaginationData,
|
|
36
39
|
Layer,
|
|
37
40
|
Offer,
|
|
38
41
|
OfferStore,
|
|
@@@ -312,14 -319,14 +315,12 @@@ class DataLayer
|
|
|
312
315
|
node = await self.data_store.get_node_by_key(tree_id=store_id, key=key, root_hash=root_hash)
|
|
313
316
|
return node.hash
|
|
314
317
|
|
|
315
|
--
async def get_value(self, store_id: bytes32, key: bytes, root_hash: Optional[bytes32] = None) ->
|
|
318
|
++
async def get_value(self, store_id: bytes32, key: bytes, root_hash: Optional[bytes32] = None) -> bytes:
|
|
316
319
|
await self._update_confirmation_status(tree_id=store_id)
|
|
317
320
|
|
|
318
321
|
async with self.data_store.transaction():
|
|
322
|
++
# this either returns the node or raises an exception
|
|
319
323
|
res = await self.data_store.get_node_by_key(tree_id=store_id, key=key, root_hash=root_hash)
|
|
320
|
--
if res is None:
|
|
321
|
--
self.log.error("Failed to fetch key")
|
|
322
|
--
return None
|
|
323
324
|
return res.value
|
|
324
325
|
|
|
325
326
|
async def get_keys_values(self, store_id: bytes32, root_hash: Optional[bytes32]) -> List[TerminalNode]:
|
|
@@@ -330,12 -337,12 +331,40 @@@
|
|
|
330
331
|
self.log.error("Failed to fetch keys values")
|
|
331
332
|
return res
|
|
332
333
|
|
|
334
|
++
async def get_keys_values_paginated(
|
|
335
|
++
self,
|
|
336
|
++
store_id: bytes32,
|
|
337
|
++
root_hash: Optional[bytes32],
|
|
338
|
++
page: int,
|
|
339
|
++
max_page_size: Optional[int] = None,
|
|
340
|
++
) -> KeysValuesPaginationData:
|
|
341
|
++
await self._update_confirmation_status(tree_id=store_id)
|
|
342
|
++
|
|
343
|
++
if max_page_size is None:
|
|
344
|
++
max_page_size = 40 * 1024 * 1024
|
|
345
|
++
res = await self.data_store.get_keys_values_paginated(store_id, page, max_page_size, root_hash)
|
|
346
|
++
return res
|
|
347
|
++
|
|
333
348
|
async def get_keys(self, store_id: bytes32, root_hash: Optional[bytes32]) -> List[bytes]:
|
|
334
349
|
await self._update_confirmation_status(tree_id=store_id)
|
|
335
350
|
|
|
336
351
|
res = await self.data_store.get_keys(store_id, root_hash)
|
|
337
352
|
return res
|
|
338
353
|
|
|
354
|
++
async def get_keys_paginated(
|
|
355
|
++
self,
|
|
356
|
++
store_id: bytes32,
|
|
357
|
++
root_hash: Optional[bytes32],
|
|
358
|
++
page: int,
|
|
359
|
++
max_page_size: Optional[int] = None,
|
|
360
|
++
) -> KeysPaginationData:
|
|
361
|
++
await self._update_confirmation_status(tree_id=store_id)
|
|
362
|
++
|
|
363
|
++
if max_page_size is None:
|
|
364
|
++
max_page_size = 40 * 1024 * 1024
|
|
365
|
++
res = await self.data_store.get_keys_paginated(store_id, page, max_page_size, root_hash)
|
|
366
|
++
return res
|
|
367
|
++
|
|
339
368
|
async def get_ancestors(self, node_hash: bytes32, store_id: bytes32) -> List[InternalNode]:
|
|
340
369
|
await self._update_confirmation_status(tree_id=store_id)
|
|
341
370
|
|
|
@@@ -635,13 -639,13 +664,14 @@@
|
|
|
635
664
|
else:
|
|
636
665
|
self.log.debug(f"uploaded to uploader {uploader}")
|
|
637
666
|
|
|
638
|
--
async def subscribe(self, store_id: bytes32, urls: List[str]) ->
|
|
667
|
++
async def subscribe(self, store_id: bytes32, urls: List[str]) -> Subscription:
|
|
639
668
|
parsed_urls = [url.rstrip("/") for url in urls]
|
|
640
669
|
subscription = Subscription(store_id, [ServerInfo(url, 0, 0) for url in parsed_urls])
|
|
641
670
|
await self.wallet_rpc.dl_track_new(subscription.tree_id)
|
|
642
671
|
async with self.subscription_lock:
|
|
643
672
|
await self.data_store.subscribe(subscription)
|
|
644
673
|
self.log.info(f"Done adding subscription: {subscription.tree_id}")
|
|
674
|
++
return subscription
|
|
645
675
|
|
|
646
676
|
async def remove_subscriptions(self, store_id: bytes32, urls: List[str]) -> None:
|
|
647
677
|
parsed_urls = [url.rstrip("/") for url in urls]
|
|
@@@ -711,6 -704,6 +741,13 @@@
|
|
|
711
741
|
async def get_kv_diff(self, tree_id: bytes32, hash_1: bytes32, hash_2: bytes32) -> Set[DiffData]:
|
|
712
742
|
return await self.data_store.get_kv_diff(tree_id, hash_1, hash_2)
|
|
713
743
|
|
|
744
|
++
async def get_kv_diff_paginated(
|
|
745
|
++
self, tree_id: bytes32, hash_1: bytes32, hash_2: bytes32, page: int, max_page_size: Optional[int] = None
|
|
746
|
++
) -> KVDiffPaginationData:
|
|
747
|
++
if max_page_size is None:
|
|
748
|
++
max_page_size = 40 * 1024 * 1024
|
|
749
|
++
return await self.data_store.get_kv_diff_paginated(tree_id, page, max_page_size, hash_1, hash_2)
|
|
750
|
++
|
|
714
751
|
async def periodically_manage_data(self) -> None:
|
|
715
752
|
manage_data_interval = self.config.get("manage_data_interval", 60)
|
|
716
753
|
while not self._shut_down:
|
|
@@@ -743,7 -736,7 +780,8 @@@
|
|
|
743
780
|
for local_id in local_tree_ids:
|
|
744
781
|
if local_id not in subscription_tree_ids:
|
|
745
782
|
try:
|
|
746
|
--
await self.subscribe(local_id, [])
|
|
783
|
++
subscription = await self.subscribe(local_id, [])
|
|
784
|
++
subscriptions.insert(0, subscription)
|
|
747
785
|
except Exception as e:
|
|
748
786
|
self.log.info(
|
|
749
787
|
f"Can't subscribe to locally stored {local_id}: {type(e)} {e} {traceback.format_exc()}"
|
|
@@@ -42,5 -42,5 +42,9 @@@ class OfferIntegrityError(Exception)
|
|
|
42
42
|
pass
|
|
43
43
|
|
|
44
44
|
|
|
45
|
++
class ProofIntegrityError(Exception):
|
|
46
|
++
pass
|
|
47
|
++
|
|
48
|
++
|
|
45
49
|
class LauncherCoinNotFoundError(Exception):
|
|
46
50
|
pass
|
|
@@@ -9,15 -9,15 +9,19 @@@ from typing import TYPE_CHECKING, Any,
|
|
|
9
9
|
import aiosqlite as aiosqlite
|
|
10
10
|
from typing_extensions import final
|
|
11
11
|
|
|
12
|
++
from chia.data_layer.data_layer_errors import ProofIntegrityError
|
|
13
|
++
from chia.server.ws_connection import WSChiaConnection
|
|
12
14
|
from chia.types.blockchain_format.program import Program
|
|
13
15
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
14
16
|
from chia.util.byte_types import hexstr_to_bytes
|
|
15
17
|
from chia.util.db_wrapper import DBWrapper2
|
|
16
|
--
from chia.util.ints import uint64
|
|
18
|
++
from chia.util.ints import uint8, uint64
|
|
17
19
|
from chia.util.streamable import Streamable, streamable
|
|
20
|
++
from chia.wallet.db_wallet.db_wallet_puzzles import create_host_fullpuz
|
|
18
21
|
|
|
19
22
|
if TYPE_CHECKING:
|
|
20
23
|
from chia.data_layer.data_store import DataStore
|
|
24
|
++
from chia.wallet.wallet_node import WalletNode
|
|
21
25
|
|
|
22
26
|
|
|
23
27
|
def internal_hash(left_hash: bytes32, right_hash: bytes32) -> bytes32:
|
|
@@@ -43,10 -43,6 +47,39 @@@ def leaf_hash(key: bytes, value: bytes
|
|
|
43
47
|
return Program.to((key, value)).get_tree_hash() # type: ignore[no-any-return]
|
|
44
48
|
|
|
45
49
|
|
|
50
|
+
def key_hash(key: bytes) -> bytes32:
|
|
51
|
+
return Program.to(key).get_tree_hash() # type: ignore[no-any-return]
|
|
52
|
+
|
|
53
|
+
|
|
54
|
++
@dataclasses.dataclass(frozen=True)
|
|
55
|
++
class PaginationData:
|
|
56
|
++
total_pages: int
|
|
57
|
++
total_bytes: int
|
|
58
|
++
hashes: List[bytes32]
|
|
59
|
++
|
|
60
|
++
|
|
61
|
++
def get_hashes_for_page(page: int, lengths: Dict[bytes32, int], max_page_size: int) -> PaginationData:
|
|
62
|
++
current_page = 0
|
|
63
|
++
current_page_size = 0
|
|
64
|
++
total_bytes = 0
|
|
65
|
++
hashes: List[bytes32] = []
|
|
66
|
++
for hash, length in sorted(lengths.items(), key=lambda x: (-x[1], x[0])):
|
|
67
|
++
if length > max_page_size:
|
|
68
|
++
raise RuntimeError(
|
|
69
|
++
f"Cannot paginate data, item size is larger than max page size: {length} {max_page_size}"
|
|
70
|
++
)
|
|
71
|
++
total_bytes += length
|
|
72
|
++
if current_page_size + length <= max_page_size:
|
|
73
|
++
current_page_size += length
|
|
74
|
++
else:
|
|
75
|
++
current_page += 1
|
|
76
|
++
current_page_size = length
|
|
77
|
++
if current_page == page:
|
|
78
|
++
hashes.append(hash)
|
|
79
|
++
|
|
80
|
++
return PaginationData(current_page + 1, total_bytes, hashes)
|
|
81
|
++
|
|
82
|
++
|
|
46
83
|
async def _debug_dump(db: DBWrapper2, description: str = "") -> None:
|
|
47
84
|
async with db.reader() as reader:
|
|
48
85
|
cursor = await reader.execute("SELECT name FROM sqlite_master WHERE type='table';")
|
|
@@@ -729,9 -725,3 +762,194 @@@ class PluginStatus
|
|
|
729
762
|
class InsertResult:
|
|
730
763
|
node_hash: bytes32
|
|
731
764
|
root: Root
|
|
765
|
+
|
|
766
|
+
|
|
767
|
+
@dataclasses.dataclass(frozen=True)
|
|
768
|
+
class UnsubscribeData:
|
|
769
|
+
tree_id: bytes32
|
|
770
|
+
retain_data: bool
|
|
771
|
++
|
|
772
|
++
|
|
773
|
++
@dataclasses.dataclass(frozen=True)
|
|
774
|
++
class KeysValuesCompressed:
|
|
775
|
++
keys_values_hashed: Dict[bytes32, bytes32]
|
|
776
|
++
key_hash_to_length: Dict[bytes32, int]
|
|
777
|
++
leaf_hash_to_length: Dict[bytes32, int]
|
|
778
|
++
root_hash: Optional[bytes32]
|
|
779
|
++
|
|
780
|
++
|
|
781
|
++
@dataclasses.dataclass(frozen=True)
|
|
782
|
++
class KeysPaginationData:
|
|
783
|
++
total_pages: int
|
|
784
|
++
total_bytes: int
|
|
785
|
++
keys: List[bytes]
|
|
786
|
++
root_hash: Optional[bytes32]
|
|
787
|
++
|
|
788
|
++
|
|
789
|
++
@dataclasses.dataclass(frozen=True)
|
|
790
|
++
class KeysValuesPaginationData:
|
|
791
|
++
total_pages: int
|
|
792
|
++
total_bytes: int
|
|
793
|
++
keys_values: List[TerminalNode]
|
|
794
|
++
root_hash: Optional[bytes32]
|
|
795
|
++
|
|
796
|
++
|
|
797
|
++
@dataclasses.dataclass(frozen=True)
|
|
798
|
++
class KVDiffPaginationData:
|
|
799
|
++
total_pages: int
|
|
800
|
++
total_bytes: int
|
|
801
|
++
kv_diff: List[DiffData]
|
|
802
|
++
|
|
803
|
++
|
|
804
|
++
#
|
|
805
|
++
# GetProof and VerifyProof support classes
|
|
806
|
++
#
|
|
807
|
++
@streamable
|
|
808
|
++
@dataclasses.dataclass(frozen=True)
|
|
809
|
++
class ProofLayer(Streamable):
|
|
810
|
++
# This class is basically Layer but streamable
|
|
811
|
++
other_hash_side: uint8
|
|
812
|
++
other_hash: bytes32
|
|
813
|
++
combined_hash: bytes32
|
|
814
|
++
|
|
815
|
++
|
|
816
|
++
@streamable
|
|
817
|
++
@dataclasses.dataclass(frozen=True)
|
|
818
|
++
class HashOnlyProof(Streamable):
|
|
819
|
++
key_clvm_hash: bytes32
|
|
820
|
++
value_clvm_hash: bytes32
|
|
821
|
++
node_hash: bytes32
|
|
822
|
++
layers: List[ProofLayer]
|
|
823
|
++
|
|
824
|
++
def root(self) -> bytes32:
|
|
825
|
++
if len(self.layers) == 0:
|
|
826
|
++
return self.node_hash
|
|
827
|
++
return self.layers[-1].combined_hash
|
|
828
|
++
|
|
829
|
++
@classmethod
|
|
830
|
++
def from_key_value(cls, key: bytes, value: bytes, node_hash: bytes32, layers: List[ProofLayer]) -> HashOnlyProof:
|
|
831
|
++
return cls(
|
|
832
|
++
key_clvm_hash=Program.to(key).get_tree_hash(),
|
|
833
|
++
value_clvm_hash=Program.to(value).get_tree_hash(),
|
|
834
|
++
node_hash=node_hash,
|
|
835
|
++
layers=layers,
|
|
836
|
++
)
|
|
837
|
++
|
|
838
|
++
|
|
839
|
++
@streamable
|
|
840
|
++
@dataclasses.dataclass(frozen=True)
|
|
841
|
++
class KeyValueHashes(Streamable):
|
|
842
|
++
key_clvm_hash: bytes32
|
|
843
|
++
value_clvm_hash: bytes32
|
|
844
|
++
|
|
845
|
++
|
|
846
|
++
@streamable
|
|
847
|
++
@dataclasses.dataclass(frozen=True)
|
|
848
|
++
class ProofResultInclusions(Streamable):
|
|
849
|
++
store_id: bytes32
|
|
850
|
++
inclusions: List[KeyValueHashes]
|
|
851
|
++
|
|
852
|
++
|
|
853
|
++
@streamable
|
|
854
|
++
@dataclasses.dataclass(frozen=True)
|
|
855
|
++
class GetProofRequest(Streamable):
|
|
856
|
++
store_id: bytes32
|
|
857
|
++
keys: List[bytes]
|
|
858
|
++
|
|
859
|
++
|
|
860
|
++
@streamable
|
|
861
|
++
@dataclasses.dataclass(frozen=True)
|
|
862
|
++
class StoreProofsHashes(Streamable):
|
|
863
|
++
store_id: bytes32
|
|
864
|
++
proofs: List[HashOnlyProof]
|
|
865
|
++
|
|
866
|
++
|
|
867
|
++
@streamable
|
|
868
|
++
@dataclasses.dataclass(frozen=True)
|
|
869
|
++
class DLProof(Streamable):
|
|
870
|
++
store_proofs: StoreProofsHashes
|
|
871
|
++
coin_id: bytes32
|
|
872
|
++
inner_puzzle_hash: bytes32
|
|
873
|
++
|
|
874
|
++
|
|
875
|
++
@streamable
|
|
876
|
++
@dataclasses.dataclass(frozen=True)
|
|
877
|
++
class GetProofResponse(Streamable):
|
|
878
|
++
proof: DLProof
|
|
879
|
++
success: bool
|
|
880
|
++
|
|
881
|
++
|
|
882
|
++
@streamable
|
|
883
|
++
@dataclasses.dataclass(frozen=True)
|
|
884
|
++
class VerifyProofResponse(Streamable):
|
|
885
|
++
verified_clvm_hashes: ProofResultInclusions
|
|
886
|
++
current_root: bool
|
|
887
|
++
success: bool
|
|
888
|
++
|
|
889
|
++
|
|
890
|
++
def dl_verify_proof_internal(dl_proof: DLProof, puzzle_hash: bytes32) -> List[KeyValueHashes]:
|
|
891
|
++
"""Verify a proof of inclusion for a DL singleton"""
|
|
892
|
++
|
|
893
|
++
verified_keys: List[KeyValueHashes] = []
|
|
894
|
++
|
|
895
|
++
for reference_proof in dl_proof.store_proofs.proofs:
|
|
896
|
++
inner_puz_hash = dl_proof.inner_puzzle_hash
|
|
897
|
++
host_fullpuz_program = create_host_fullpuz(
|
|
898
|
++
inner_puz_hash, reference_proof.root(), dl_proof.store_proofs.store_id
|
|
899
|
++
)
|
|
900
|
++
expected_puzzle_hash = host_fullpuz_program.get_tree_hash_precalc(inner_puz_hash)
|
|
901
|
++
|
|
902
|
++
if puzzle_hash != expected_puzzle_hash:
|
|
903
|
++
raise ProofIntegrityError(
|
|
904
|
++
"Invalid Proof: incorrect puzzle hash: expected:"
|
|
905
|
++
f"{expected_puzzle_hash.hex()} received: {puzzle_hash.hex()}"
|
|
906
|
++
)
|
|
907
|
++
|
|
908
|
++
proof = ProofOfInclusion(
|
|
909
|
++
node_hash=reference_proof.node_hash,
|
|
910
|
++
layers=[
|
|
911
|
++
ProofOfInclusionLayer(
|
|
912
|
++
other_hash_side=Side(layer.other_hash_side),
|
|
913
|
++
other_hash=layer.other_hash,
|
|
914
|
++
combined_hash=layer.combined_hash,
|
|
915
|
++
)
|
|
916
|
++
for layer in reference_proof.layers
|
|
917
|
++
],
|
|
918
|
++
)
|
|
919
|
++
|
|
920
|
++
leaf_hash = internal_hash(left_hash=reference_proof.key_clvm_hash, right_hash=reference_proof.value_clvm_hash)
|
|
921
|
++
if leaf_hash != proof.node_hash:
|
|
922
|
++
raise ProofIntegrityError("Invalid Proof: node hash does not match key and value")
|
|
923
|
++
|
|
924
|
++
if not proof.valid():
|
|
925
|
++
raise ProofIntegrityError("Invalid Proof: invalid proof of inclusion found")
|
|
926
|
++
|
|
927
|
++
verified_keys.append(
|
|
928
|
++
KeyValueHashes(key_clvm_hash=reference_proof.key_clvm_hash, value_clvm_hash=reference_proof.value_clvm_hash)
|
|
929
|
++
)
|
|
930
|
++
|
|
931
|
++
return verified_keys
|
|
932
|
++
|
|
933
|
++
|
|
934
|
++
async def dl_verify_proof(
|
|
935
|
++
request: Dict[str, Any],
|
|
936
|
++
wallet_node: WalletNode,
|
|
937
|
++
peer: WSChiaConnection,
|
|
938
|
++
) -> Dict[str, Any]:
|
|
939
|
++
"""Verify a proof of inclusion for a DL singleton"""
|
|
940
|
++
|
|
941
|
++
dlproof = DLProof.from_json_dict(request)
|
|
942
|
++
|
|
943
|
++
coin_id = dlproof.coin_id
|
|
944
|
++
coin_states = await wallet_node.get_coin_state([coin_id], peer=peer)
|
|
945
|
++
if len(coin_states) == 0:
|
|
946
|
++
raise ProofIntegrityError(f"Invalid Proof: No DL singleton found at coin id: {coin_id.hex()}")
|
|
947
|
++
|
|
948
|
++
verified_keys = dl_verify_proof_internal(dlproof, coin_states[0].coin.puzzle_hash)
|
|
949
|
++
|
|
950
|
++
response = VerifyProofResponse(
|
|
951
|
++
verified_clvm_hashes=ProofResultInclusions(dlproof.store_proofs.store_id, verified_keys),
|
|
952
|
++
success=True,
|
|
953
|
++
current_root=coin_states[0].spent_height is None,
|
|
954
|
++
)
|
|
955
|
++
return response.to_json_dict()
|
|
@@@ -859,7 -864,7 +859,7 @@@ class DataLayerWallet
|
|
|
859
859
|
puzzle = parent_spend.puzzle_reveal
|
|
860
860
|
solution = parent_spend.solution
|
|
861
861
|
|
|
862
|
--
matched, _ = match_dl_singleton(puzzle
|
|
862
|
++
matched, _ = match_dl_singleton(puzzle)
|
|
863
863
|
if matched:
|
|
864
864
|
self.log.info(f"DL singleton removed: {parent_spend.coin}")
|
|
865
865
|
singleton_record: Optional[SingletonRecord] = await self.wallet_state_manager.dl_store.get_singleton_record(
|
|
@@@ -1184,7 -1193,7 +1184,7 @@@
|
|
|
1184
1184
|
|
|
1185
1185
|
assert txs[0].spend_bundle is not None
|
|
1186
1186
|
dl_spend: CoinSpend = next(
|
|
1187
|
--
cs for cs in txs[0].spend_bundle.coin_spends if match_dl_singleton(cs.puzzle_reveal
|
|
1187
|
++
cs for cs in txs[0].spend_bundle.coin_spends if match_dl_singleton(cs.puzzle_reveal)[0]
|
|
1188
1188
|
)
|
|
1189
1189
|
all_other_spends: List[CoinSpend] = [cs for cs in txs[0].spend_bundle.coin_spends if cs != dl_spend]
|
|
1190
1190
|
dl_solution: Program = dl_spend.solution.to_program()
|
|
@@@ -1226,18 -1240,18 +1226,18 @@@
|
|
|
1226
1226
|
singleton_to_root: Dict[bytes32, bytes32] = {}
|
|
1227
1227
|
all_parent_ids: List[bytes32] = [cs.coin.parent_coin_info for cs in offer.coin_spends()]
|
|
1228
1228
|
for spend in offer.coin_spends():
|
|
1229
|
--
matched, curried_args = match_dl_singleton(spend.puzzle_reveal
|
|
1229
|
++
matched, curried_args = match_dl_singleton(spend.puzzle_reveal)
|
|
1230
1230
|
if matched and spend.coin.name() not in all_parent_ids:
|
|
1231
|
-
innerpuz,
|
|
1231
|
+
innerpuz, root_prg, launcher_id = curried_args
|
|
1232
1232
|
singleton_struct = launcher_to_struct(bytes32(launcher_id.as_python())).get_tree_hash()
|
|
1233
|
-
singleton_to_root[singleton_struct] = bytes32(
|
|
1233
|
+
singleton_to_root[singleton_struct] = bytes32(root_prg.as_python())
|
|
1234
1234
|
singleton_to_innerpuzhash[singleton_struct] = innerpuz.get_tree_hash()
|
|
1235
1235
|
|
|
1236
1236
|
# Create all of the new solutions
|
|
1237
1237
|
new_spends: List[CoinSpend] = []
|
|
1238
1238
|
for spend in offer.coin_spends():
|
|
1239
1239
|
solution = spend.solution.to_program()
|
|
1240
|
--
if match_dl_singleton(spend.puzzle_reveal
|
|
1240
|
++
if match_dl_singleton(spend.puzzle_reveal)[0]:
|
|
1241
1241
|
try:
|
|
1242
1242
|
graftroot: Program = solution.at("rrffrf")
|
|
1243
1243
|
except EvalError:
|
|
@@@ -1295,7 -1312,7 +1295,7 @@@
|
|
|
1295
1295
|
summary: Dict[str, Any] = {"offered": []}
|
|
1296
1296
|
for spend in offer.coin_spends():
|
|
1297
1297
|
solution = spend.solution.to_program()
|
|
1298
|
--
matched, curried_args = match_dl_singleton(spend.puzzle_reveal
|
|
1298
|
++
matched, curried_args = match_dl_singleton(spend.puzzle_reveal)
|
|
1299
1299
|
if matched:
|
|
1300
1300
|
try:
|
|
1301
1301
|
graftroot: Program = solution.at("rrffrf")
|
|
@@@ -1306,7 -1323,7 +1306,7 @@@
|
|
|
1306
1306
|
child_spend: CoinSpend = next(
|
|
1307
1307
|
cs for cs in offer.coin_spends() if cs.coin.parent_coin_info == spend.coin.name()
|
|
1308
1308
|
)
|
|
1309
|
--
_, child_curried_args = match_dl_singleton(child_spend.puzzle_reveal
|
|
1309
|
++
_, child_curried_args = match_dl_singleton(child_spend.puzzle_reveal)
|
|
1310
1310
|
singleton_summary = {
|
|
1311
1311
|
"launcher_id": list(curried_args)[2].as_python().hex(),
|
|
1312
1312
|
"new_root": list(child_curried_args)[1].as_python().hex(),
|
|
@@@ -15,6 -14,6 +15,10 @@@ from chia.data_layer.data_layer_util im
|
|
|
15
15
|
DiffData,
|
|
16
16
|
InsertResult,
|
|
17
17
|
InternalNode,
|
|
18
|
++
KeysPaginationData,
|
|
19
|
++
KeysValuesCompressed,
|
|
20
|
++
KeysValuesPaginationData,
|
|
21
|
++
KVDiffPaginationData,
|
|
18
22
|
Node,
|
|
19
23
|
NodeType,
|
|
20
24
|
OperationType,
|
|
@@@ -27,8 -26,7 +31,9 @@@
|
|
|
27
31
|
Status,
|
|
28
32
|
Subscription,
|
|
29
33
|
TerminalNode,
|
|
34
|
++
get_hashes_for_page,
|
|
30
35
|
internal_hash,
|
|
36
|
+
key_hash,
|
|
31
37
|
leaf_hash,
|
|
32
38
|
row_to_node,
|
|
33
39
|
)
|
|
@@@ -728,26 -723,6 +733,106 @@@ class DataStore
|
|
|
728
733
|
|
|
729
734
|
return terminal_nodes
|
|
730
735
|
|
|
736
|
+
async def get_keys_values_compressed(
|
|
737
|
+
self, tree_id: bytes32, root_hash: Optional[bytes32] = None
|
|
731
|
-
) ->
|
|
738
|
++
) -> KeysValuesCompressed:
|
|
739
|
+
async with self.db_wrapper.reader() as reader:
|
|
740
|
+
if root_hash is None:
|
|
741
|
+
root = await self.get_tree_root(tree_id=tree_id)
|
|
742
|
+
root_hash = root.node_hash
|
|
743
|
+
|
|
744
|
+
cursor = await self.get_keys_values_cursor(reader, root_hash)
|
|
732
|
-
|
|
745
|
++
keys_values_hashed: Dict[bytes32, bytes32] = {}
|
|
746
|
++
key_hash_to_length: Dict[bytes32, int] = {}
|
|
747
|
++
leaf_hash_to_length: Dict[bytes32, int] = {}
|
|
748
|
+
async for row in cursor:
|
|
749
|
+
if row["depth"] > 62:
|
|
750
|
+
raise Exception("Tree depth exceeded 62, unable to guarantee left-to-right node order.")
|
|
751
|
+
node = row_to_node(row=row)
|
|
752
|
+
if not isinstance(node, TerminalNode):
|
|
753
|
+
raise Exception(f"Unexpected internal node found: {node.hash.hex()}")
|
|
733
|
-
|
|
754
|
++
keys_values_hashed[key_hash(node.key)] = leaf_hash(node.key, node.value)
|
|
755
|
++
key_hash_to_length[key_hash(node.key)] = len(node.key)
|
|
756
|
++
leaf_hash_to_length[leaf_hash(node.key, node.value)] = len(node.key) + len(node.value)
|
|
757
|
++
|
|
758
|
++
return KeysValuesCompressed(keys_values_hashed, key_hash_to_length, leaf_hash_to_length, root_hash)
|
|
759
|
++
|
|
760
|
++
async def get_keys_paginated(
|
|
761
|
++
self, tree_id: bytes32, page: int, max_page_size: int, root_hash: Optional[bytes32] = None
|
|
762
|
++
) -> KeysPaginationData:
|
|
763
|
++
keys_values_compressed = await self.get_keys_values_compressed(tree_id, root_hash)
|
|
764
|
++
pagination_data = get_hashes_for_page(page, keys_values_compressed.key_hash_to_length, max_page_size)
|
|
765
|
++
|
|
766
|
++
keys: List[bytes] = []
|
|
767
|
++
for hash in pagination_data.hashes:
|
|
768
|
++
leaf_hash = keys_values_compressed.keys_values_hashed[hash]
|
|
769
|
++
node = await self.get_node(leaf_hash)
|
|
770
|
++
assert isinstance(node, TerminalNode)
|
|
771
|
++
keys.append(node.key)
|
|
772
|
++
|
|
773
|
++
return KeysPaginationData(
|
|
774
|
++
pagination_data.total_pages,
|
|
775
|
++
pagination_data.total_bytes,
|
|
776
|
++
keys,
|
|
777
|
++
keys_values_compressed.root_hash,
|
|
778
|
++
)
|
|
779
|
++
|
|
780
|
++
async def get_keys_values_paginated(
|
|
781
|
++
self, tree_id: bytes32, page: int, max_page_size: int, root_hash: Optional[bytes32] = None
|
|
782
|
++
) -> KeysValuesPaginationData:
|
|
783
|
++
keys_values_compressed = await self.get_keys_values_compressed(tree_id, root_hash)
|
|
784
|
++
pagination_data = get_hashes_for_page(page, keys_values_compressed.leaf_hash_to_length, max_page_size)
|
|
785
|
++
|
|
786
|
++
keys_values: List[TerminalNode] = []
|
|
787
|
++
for hash in pagination_data.hashes:
|
|
788
|
++
node = await self.get_node(hash)
|
|
789
|
++
assert isinstance(node, TerminalNode)
|
|
790
|
++
keys_values.append(node)
|
|
791
|
++
|
|
792
|
++
return KeysValuesPaginationData(
|
|
793
|
++
pagination_data.total_pages,
|
|
794
|
++
pagination_data.total_bytes,
|
|
795
|
++
keys_values,
|
|
796
|
++
keys_values_compressed.root_hash,
|
|
797
|
++
)
|
|
798
|
+
|
|
734
|
-
|
|
799
|
++
async def get_kv_diff_paginated(
|
|
800
|
++
self, tree_id: bytes32, page: int, max_page_size: int, hash1: bytes32, hash2: bytes32
|
|
801
|
++
) -> KVDiffPaginationData:
|
|
802
|
++
old_pairs = await self.get_keys_values_compressed(tree_id, hash1)
|
|
803
|
++
new_pairs = await self.get_keys_values_compressed(tree_id, hash2)
|
|
804
|
++
if len(old_pairs.keys_values_hashed) == 0 and hash1 != bytes32([0] * 32):
|
|
805
|
++
return KVDiffPaginationData(1, 0, [])
|
|
806
|
++
if len(new_pairs.keys_values_hashed) == 0 and hash2 != bytes32([0] * 32):
|
|
807
|
++
return KVDiffPaginationData(1, 0, [])
|
|
808
|
++
|
|
809
|
++
old_pairs_leaf_hashes = {v for v in old_pairs.keys_values_hashed.values()}
|
|
810
|
++
new_pairs_leaf_hashes = {v for v in new_pairs.keys_values_hashed.values()}
|
|
811
|
++
insertions = {k for k in new_pairs_leaf_hashes if k not in old_pairs_leaf_hashes}
|
|
812
|
++
deletions = {k for k in old_pairs_leaf_hashes if k not in new_pairs_leaf_hashes}
|
|
813
|
++
lengths = {}
|
|
814
|
++
for hash in insertions:
|
|
815
|
++
lengths[hash] = new_pairs.leaf_hash_to_length[hash]
|
|
816
|
++
for hash in deletions:
|
|
817
|
++
lengths[hash] = old_pairs.leaf_hash_to_length[hash]
|
|
818
|
++
|
|
819
|
++
pagination_data = get_hashes_for_page(page, lengths, max_page_size)
|
|
820
|
++
kv_diff: List[DiffData] = []
|
|
821
|
++
|
|
822
|
++
for hash in pagination_data.hashes:
|
|
823
|
++
node = await self.get_node(hash)
|
|
824
|
++
assert isinstance(node, TerminalNode)
|
|
825
|
++
if hash in insertions:
|
|
826
|
++
kv_diff.append(DiffData(OperationType.INSERT, node.key, node.value))
|
|
827
|
++
else:
|
|
828
|
++
kv_diff.append(DiffData(OperationType.DELETE, node.key, node.value))
|
|
829
|
++
|
|
830
|
++
return KVDiffPaginationData(
|
|
831
|
++
pagination_data.total_pages,
|
|
832
|
++
pagination_data.total_bytes,
|
|
833
|
++
kv_diff,
|
|
834
|
++
)
|
|
835
|
+
|
|
735
836
|
async def get_node_type(self, node_hash: bytes32) -> NodeType:
|
|
736
837
|
async with self.db_wrapper.reader() as reader:
|
|
737
838
|
cursor = await reader.execute(
|
|
@@@ -1258,7 -1085,7 +1343,8 @@@
|
|
|
1258
1343
|
if old_root.node_hash is None:
|
|
1259
1344
|
hint_keys_values = {}
|
|
1260
1345
|
else:
|
|
1261
|
-
|
|
1262
|
-
hint_keys_values =
|
|
1346
|
++
kv_compressed = await self.get_keys_values_compressed(tree_id, root_hash=root_hash)
|
|
1347
|
++
hint_keys_values = kv_compressed.keys_values_hashed
|
|
1263
1348
|
|
|
1264
1349
|
intermediate_root: Optional[Root] = old_root
|
|
1265
1350
|
for change in changelist:
|
|
@@@ -588,33 -495,3 +588,33 @@@ class CoinStore
|
|
|
588
588
|
raise ValueError(
|
|
589
589
|
f"Invalid operation to set spent, total updates {rows_updated} expected {len(coin_names)}"
|
|
590
590
|
)
|
|
591
|
+
|
|
592
|
+
# Lookup the most recent unspent lineage that matches a puzzle hash
|
|
593
|
+
async def get_unspent_lineage_info_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[UnspentLineageInfo]:
|
|
594
|
+
async with self.db_wrapper.reader_no_transaction() as conn:
|
|
595
|
+
async with conn.execute(
|
|
596
|
+
"SELECT unspent.coin_name, "
|
|
597
|
+
"unspent.amount, "
|
|
598
|
+
"unspent.coin_parent, "
|
|
599
|
+
"parent.amount, "
|
|
600
|
+
"parent.coin_parent "
|
|
591
|
-
"FROM coin_record AS unspent "
|
|
601
|
++
"FROM coin_record AS unspent INDEXED BY coin_puzzle_hash "
|
|
602
|
+
"LEFT JOIN coin_record AS parent ON unspent.coin_parent = parent.coin_name "
|
|
603
|
+
"WHERE unspent.spent_index = 0 "
|
|
604
|
+
"AND parent.spent_index > 0 "
|
|
605
|
+
"AND unspent.puzzle_hash = ? "
|
|
606
|
+
"AND parent.puzzle_hash = unspent.puzzle_hash",
|
|
607
|
+
(puzzle_hash,),
|
|
608
|
+
) as cursor:
|
|
609
|
+
rows = list(await cursor.fetchall())
|
|
610
|
+
if len(rows) != 1:
|
|
611
|
+
log.debug("Expected 1 unspent with puzzle hash %s, but found %s", puzzle_hash.hex(), len(rows))
|
|
612
|
+
return None
|
|
613
|
+
coin_id, coin_amount, parent_id, parent_amount, parent_parent_id = rows[0]
|
|
614
|
+
return UnspentLineageInfo(
|
|
615
|
+
coin_id=bytes32(coin_id),
|
|
616
|
+
coin_amount=int_from_bytes(coin_amount),
|
|
617
|
+
parent_id=bytes32(parent_id),
|
|
618
|
+
parent_amount=int_from_bytes(parent_amount),
|
|
619
|
+
parent_parent_id=bytes32(parent_parent_id),
|
|
620
|
+
)
|
|
@@@ -26,20 -25,6 +26,20 @@@ from chia.util.misc import to_batche
|
|
|
26
26
|
|
|
27
27
|
log = logging.getLogger(__name__)
|
|
28
28
|
|
|
29
|
+
# Maximum number of mempool items that can be skipped (not considered) during
|
|
30
|
+
# the creation of a block bundle. An item is skipped if it won't fit in the
|
|
31
|
+
# block we're trying to create.
|
|
29
|
-
MAX_SKIPPED_ITEMS =
|
|
32
|
++
MAX_SKIPPED_ITEMS = 10
|
|
33
|
+
|
|
34
|
+
# Threshold after which we stop including mempool items with eligible spends
|
|
35
|
+
# during the creation of a block bundle. We do that to avoid spending too much
|
|
36
|
+
# time on potentially expensive items.
|
|
37
|
+
PRIORITY_TX_THRESHOLD = 3
|
|
38
|
+
|
|
39
|
+
# Typical cost of a standard XCH spend. It's used as a heuristic to help
|
|
40
|
+
# determine how close to the block size limit we're willing to go.
|
|
41
|
+
MIN_COST_THRESHOLD = 6_000_000
|
|
42
|
+
|
|
30
43
|
# We impose a limit on the fee a single transaction can pay in order to have the
|
|
31
44
|
# sum of all fees in the mempool be less than 2^63. That's the limit of sqlite's
|
|
32
45
|
# integers, which we rely on for computing fee per cost as well as the fee sum
|
|
@@@ -168,7 -156,6 +168,9 @@@ class MempoolManager
|
|
|
168
168
|
seen_cache_size: int
|
|
169
169
|
peak: Optional[BlockRecordProtocol]
|
|
170
170
|
mempool: Mempool
|
|
171
|
+
_worker_queue_size: int
|
|
172
|
++
max_block_clvm_cost: uint64
|
|
173
|
++
max_tx_clvm_cost: uint64
|
|
171
174
|
|
|
172
175
|
def __init__(
|
|
173
176
|
self,
|
|
@@@ -177,6 -164,6 +179,7 @@@
|
|
|
177
179
|
multiprocessing_context: Optional[BaseContext] = None,
|
|
178
180
|
*,
|
|
179
181
|
single_threaded: bool = False,
|
|
182
|
++
max_tx_clvm_cost: Optional[uint64] = None,
|
|
180
183
|
):
|
|
181
184
|
self.constants: ConsensusConstants = consensus_constants
|
|
182
185
|
|
|
@@@ -190,8 -177,8 +193,11 @@@
|
|
|
190
193
|
# spends.
|
|
191
194
|
self.nonzero_fee_minimum_fpc = 5
|
|
192
195
|
|
|
193
|
--
BLOCK_SIZE_LIMIT_FACTOR = 0.
|
|
196
|
++
BLOCK_SIZE_LIMIT_FACTOR = 0.6
|
|
194
197
|
self.max_block_clvm_cost = uint64(self.constants.MAX_BLOCK_COST_CLVM * BLOCK_SIZE_LIMIT_FACTOR)
|
|
198
|
++
self.max_tx_clvm_cost = (
|
|
199
|
++
max_tx_clvm_cost if max_tx_clvm_cost is not None else uint64(self.constants.MAX_BLOCK_COST_CLVM // 2)
|
|
200
|
++
)
|
|
195
201
|
self.mempool_max_total_cost = int(self.constants.MAX_BLOCK_COST_CLVM * self.constants.MEMPOOL_BLOCK_BUFFER)
|
|
196
202
|
|
|
197
203
|
# Transactions that were unable to enter mempool, used for retry. (they were invalid)
|
|
@@@ -300,18 -280,14 +306,18 @@@
|
|
|
300
306
|
|
|
301
307
|
assert self.peak is not None
|
|
302
308
|
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
self.peak.height,
|
|
310
|
-
)
|
|
309
|
+
self._worker_queue_size += 1
|
|
310
|
+
try:
|
|
311
|
+
err, cached_result_bytes, new_cache_entries, duration = await asyncio.get_running_loop().run_in_executor(
|
|
312
|
+
self.pool,
|
|
313
|
+
validate_clvm_and_signature,
|
|
314
|
+
new_spend_bytes,
|
|
311
|
-
self.
|
|
315
|
++
self.max_tx_clvm_cost,
|
|
316
|
+
self.constants,
|
|
317
|
+
self.peak.height,
|
|
318
|
+
)
|
|
319
|
+
finally:
|
|
320
|
+
self._worker_queue_size -= 1
|
|
312
321
|
|
|
313
322
|
if err is not None:
|
|
314
323
|
raise ValidationError(err)
|
|
@@@ -434,7 -398,7 +440,7 @@@
|
|
|
434
440
|
removal_names.add(coin_id)
|
|
435
441
|
spend_additions = []
|
|
436
442
|
for puzzle_hash, amount, _ in spend.create_coin:
|
|
437
|
--
child_coin = Coin(coin_id, puzzle_hash, amount)
|
|
443
|
++
child_coin = Coin(coin_id, puzzle_hash, uint64(amount))
|
|
438
444
|
spend_additions.append(child_coin)
|
|
439
445
|
additions_dict[child_coin.name()] = child_coin
|
|
440
446
|
addition_amount = addition_amount + child_coin.amount
|
|
@@@ -513,7 -450,7 +519,7 @@@
|
|
|
513
519
|
if cost == 0:
|
|
514
520
|
return Err.UNKNOWN, None, []
|
|
515
521
|
|
|
516
|
--
if cost > self.
|
|
522
|
++
if cost > self.max_tx_clvm_cost:
|
|
517
523
|
return Err.BLOCK_COST_EXCEEDS_MAX, None, []
|
|
518
524
|
|
|
519
525
|
# this is not very likely to happen, but it's here to ensure SQLite
|
|
@@@ -707,10 -707,10 +707,10 @@@ def _create_sub_epoch_data
|
|
|
707
707
|
) -> SubEpochData:
|
|
708
708
|
reward_chain_hash: bytes32 = sub_epoch_summary.reward_chain_hash
|
|
709
709
|
# Number of subblocks overflow in previous slot
|
|
710
|
-
previous_sub_epoch_overflows =
|
|
711
|
-
previous_sub_epoch_overflows: uint8 = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected
|
|
710
|
++
previous_sub_epoch_overflows = sub_epoch_summary.num_blocks_overflow # total in sub epoch - expected
|
|
712
711
|
# New work difficulty and iterations per sub-slot
|
|
713
|
-
sub_slot_iters
|
|
714
|
-
new_difficulty
|
|
715
|
-
sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
|
|
716
|
-
new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty
|
|
712
|
++
sub_slot_iters = sub_epoch_summary.new_sub_slot_iters
|
|
713
|
++
new_difficulty = sub_epoch_summary.new_difficulty
|
|
717
714
|
return SubEpochData(reward_chain_hash, previous_sub_epoch_overflows, sub_slot_iters, new_difficulty)
|
|
718
715
|
|
|
719
716
|
|
|
@@@ -1558,8 -1558,8 +1558,8 @@@ def get_sp_total_iters
|
|
|
1558
1558
|
assert sub_slot_data.cc_ip_vdf_info is not None
|
|
1559
1559
|
assert sub_slot_data.total_iters is not None
|
|
1560
1560
|
assert sub_slot_data.signage_point_index is not None
|
|
1561
|
-
sp_iters
|
|
1562
|
-
ip_iters
|
|
1563
|
-
sp_iters: uint64 = calculate_sp_iters(constants, ssi, sub_slot_data.signage_point_index)
|
|
1564
|
-
ip_iters: uint64 = sub_slot_data.cc_ip_vdf_info.number_of_iterations
|
|
1561
|
++
sp_iters = calculate_sp_iters(constants, ssi, sub_slot_data.signage_point_index)
|
|
1562
|
++
ip_iters = sub_slot_data.cc_ip_vdf_info.number_of_iterations
|
|
1565
1563
|
sp_sub_slot_total_iters = uint128(sub_slot_data.total_iters - ip_iters)
|
|
1566
1564
|
if is_overflow:
|
|
1567
1565
|
sp_sub_slot_total_iters = uint128(sp_sub_slot_total_iters - ssi)
|
|
@@@ -10,22 -10,22 +10,30 @@@ from chia.data_layer.data_layer_util im
|
|
|
10
10
|
CancelOfferResponse,
|
|
11
11
|
ClearPendingRootsRequest,
|
|
12
12
|
ClearPendingRootsResponse,
|
|
13
|
++
DLProof,
|
|
14
|
++
GetProofRequest,
|
|
15
|
++
GetProofResponse,
|
|
16
|
++
HashOnlyProof,
|
|
13
17
|
MakeOfferRequest,
|
|
14
18
|
MakeOfferResponse,
|
|
19
|
++
ProofLayer,
|
|
15
20
|
Side,
|
|
21
|
++
StoreProofsHashes,
|
|
16
22
|
Subscription,
|
|
17
23
|
TakeOfferRequest,
|
|
18
24
|
TakeOfferResponse,
|
|
19
25
|
VerifyOfferResponse,
|
|
26
|
++
VerifyProofResponse,
|
|
20
27
|
)
|
|
21
28
|
from chia.data_layer.data_layer_wallet import DataLayerWallet, Mirror, verify_offer
|
|
22
29
|
from chia.rpc.data_layer_rpc_util import marshal
|
|
23
30
|
from chia.rpc.rpc_server import Endpoint, EndpointResult
|
|
31
|
++
from chia.rpc.util import marshal as streamable_marshal
|
|
24
32
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
25
33
|
from chia.util.byte_types import hexstr_to_bytes
|
|
26
34
|
|
|
27
35
|
# todo input assertions for all rpc's
|
|
28
|
--
from chia.util.ints import uint64
|
|
36
|
++
from chia.util.ints import uint8, uint64
|
|
29
37
|
from chia.util.streamable import recurse_jsonify
|
|
30
38
|
from chia.util.ws_message import WsRpcMessage
|
|
31
39
|
from chia.wallet.trading.offer import Offer as TradingOffer
|
|
@@@ -104,6 -104,6 +112,8 @@@ class DataLayerRpcApi
|
|
|
104
112
|
"/get_sync_status": self.get_sync_status,
|
|
105
113
|
"/check_plugins": self.check_plugins,
|
|
106
114
|
"/clear_pending_roots": self.clear_pending_roots,
|
|
115
|
++
"/get_proof": self.get_proof,
|
|
116
|
++
"/verify_proof": self.verify_proof,
|
|
107
117
|
}
|
|
108
118
|
|
|
109
119
|
async def _state_changed(self, change: str, change_data: Optional[Dict[str, Any]]) -> List[WsRpcMessage]:
|
|
@@@ -150,30 -150,30 +160,69 @@@
|
|
|
150
160
|
async def get_keys(self, request: Dict[str, Any]) -> EndpointResult:
|
|
151
161
|
store_id = bytes32.from_hexstr(request["id"])
|
|
152
162
|
root_hash = request.get("root_hash")
|
|
163
|
++
page = request.get("page", None)
|
|
164
|
++
max_page_size = request.get("max_page_size", None)
|
|
153
165
|
if root_hash is not None:
|
|
154
166
|
root_hash = bytes32.from_hexstr(root_hash)
|
|
155
167
|
if self.service is None:
|
|
156
168
|
raise Exception("Data layer not created")
|
|
157
|
--
|
|
169
|
++
|
|
170
|
++
if page is None:
|
|
171
|
++
keys = await self.service.get_keys(store_id, root_hash)
|
|
172
|
++
else:
|
|
173
|
++
keys_paginated = await self.service.get_keys_paginated(store_id, root_hash, page, max_page_size)
|
|
174
|
++
keys = keys_paginated.keys
|
|
175
|
++
|
|
158
176
|
if keys == [] and root_hash is not None and root_hash != bytes32([0] * 32):
|
|
159
177
|
raise Exception(f"Can't find keys for {root_hash}")
|
|
160
|
--
|
|
178
|
++
|
|
179
|
++
response: EndpointResult = {"keys": [f"0x{key.hex()}" for key in keys]}
|
|
180
|
++
|
|
181
|
++
if page is not None:
|
|
182
|
++
response.update(
|
|
183
|
++
{
|
|
184
|
++
"total_pages": keys_paginated.total_pages,
|
|
185
|
++
"total_bytes": keys_paginated.total_bytes,
|
|
186
|
++
"root_hash": keys_paginated.root_hash,
|
|
187
|
++
},
|
|
188
|
++
)
|
|
189
|
++
|
|
190
|
++
return response
|
|
161
191
|
|
|
162
192
|
async def get_keys_values(self, request: Dict[str, Any]) -> EndpointResult:
|
|
163
193
|
store_id = bytes32(hexstr_to_bytes(request["id"]))
|
|
164
194
|
root_hash = request.get("root_hash")
|
|
195
|
++
page = request.get("page", None)
|
|
196
|
++
max_page_size = request.get("max_page_size", None)
|
|
165
197
|
if root_hash is not None:
|
|
166
198
|
root_hash = bytes32.from_hexstr(root_hash)
|
|
167
199
|
if self.service is None:
|
|
168
200
|
raise Exception("Data layer not created")
|
|
169
|
--
|
|
170
|
--
|
|
171
|
--
|
|
172
|
--
|
|
173
|
--
|
|
174
|
--
|
|
201
|
++
|
|
202
|
++
if page is None:
|
|
203
|
++
keys_values = await self.service.get_keys_values(store_id, root_hash)
|
|
204
|
++
else:
|
|
205
|
++
keys_values_paginated = await self.service.get_keys_values_paginated(
|
|
206
|
++
store_id, root_hash, page, max_page_size
|
|
207
|
++
)
|
|
208
|
++
keys_values = keys_values_paginated.keys_values
|
|
209
|
++
|
|
210
|
++
json_nodes = [recurse_jsonify(dataclasses.asdict(node)) for node in keys_values]
|
|
211
|
++
if not json_nodes and root_hash is not None and root_hash != bytes32([0] * 32):
|
|
175
212
|
raise Exception(f"Can't find keys and values for {root_hash}")
|
|
176
|
--
|
|
213
|
++
|
|
214
|
++
response: EndpointResult = {"keys_values": json_nodes}
|
|
215
|
++
|
|
216
|
++
if page is not None:
|
|
217
|
++
response.update(
|
|
218
|
++
{
|
|
219
|
++
"total_pages": keys_values_paginated.total_pages,
|
|
220
|
++
"total_bytes": keys_values_paginated.total_bytes,
|
|
221
|
++
"root_hash": keys_values_paginated.root_hash,
|
|
222
|
++
},
|
|
223
|
++
)
|
|
224
|
++
|
|
225
|
++
return response
|
|
177
226
|
|
|
178
227
|
async def get_ancestors(self, request: Dict[str, Any]) -> EndpointResult:
|
|
179
228
|
store_id = bytes32(hexstr_to_bytes(request["id"]))
|
|
@@@ -359,11 -359,11 +408,32 @@@
|
|
|
359
408
|
hash_1_bytes = bytes32.from_hexstr(hash_1)
|
|
360
409
|
hash_2 = request["hash_2"]
|
|
361
410
|
hash_2_bytes = bytes32.from_hexstr(hash_2)
|
|
362
|
--
|
|
411
|
++
page = request.get("page", None)
|
|
412
|
++
max_page_size = request.get("max_page_size", None)
|
|
363
413
|
res: List[Dict[str, Any]] = []
|
|
414
|
++
|
|
415
|
++
if page is None:
|
|
416
|
++
records_dict = await self.service.get_kv_diff(id_bytes, hash_1_bytes, hash_2_bytes)
|
|
417
|
++
records = list(records_dict)
|
|
418
|
++
else:
|
|
419
|
++
kv_diff_paginated = await self.service.get_kv_diff_paginated(
|
|
420
|
++
id_bytes, hash_1_bytes, hash_2_bytes, page, max_page_size
|
|
421
|
++
)
|
|
422
|
++
records = kv_diff_paginated.kv_diff
|
|
423
|
++
|
|
364
424
|
for rec in records:
|
|
365
|
--
res.
|
|
366
|
--
|
|
425
|
++
res.append({"type": rec.type.name, "key": rec.key.hex(), "value": rec.value.hex()})
|
|
426
|
++
|
|
427
|
++
response: EndpointResult = {"diff": res}
|
|
428
|
++
if page is not None:
|
|
429
|
++
response.update(
|
|
430
|
++
{
|
|
431
|
++
"total_pages": kv_diff_paginated.total_pages,
|
|
432
|
++
"total_bytes": kv_diff_paginated.total_bytes,
|
|
433
|
++
},
|
|
434
|
++
)
|
|
435
|
++
|
|
436
|
++
return response
|
|
367
437
|
|
|
368
438
|
async def add_mirror(self, request: Dict[str, Any]) -> EndpointResult:
|
|
369
439
|
store_id = request["id"]
|
|
@@@ -458,3 -458,3 +528,44 @@@
|
|
|
458
528
|
root = await self.service.data_store.clear_pending_roots(tree_id=request.store_id)
|
|
459
529
|
|
|
460
530
|
return ClearPendingRootsResponse(success=root is not None, root=root)
|
|
531
|
++
|
|
532
|
++
@streamable_marshal
|
|
533
|
++
async def get_proof(self, request: GetProofRequest) -> GetProofResponse:
|
|
534
|
++
root = await self.service.get_root(store_id=request.store_id)
|
|
535
|
++
if root is None:
|
|
536
|
++
raise ValueError("no root")
|
|
537
|
++
|
|
538
|
++
all_proofs: List[HashOnlyProof] = []
|
|
539
|
++
for key in request.keys:
|
|
540
|
++
key_value = await self.service.get_value(store_id=request.store_id, key=key)
|
|
541
|
++
pi = await self.service.data_store.get_proof_of_inclusion_by_key(tree_id=request.store_id, key=key)
|
|
542
|
++
|
|
543
|
++
proof = HashOnlyProof.from_key_value(
|
|
544
|
++
key=key,
|
|
545
|
++
value=key_value,
|
|
546
|
++
node_hash=pi.node_hash,
|
|
547
|
++
layers=[
|
|
548
|
++
ProofLayer(
|
|
549
|
++
other_hash_side=uint8(layer.other_hash_side),
|
|
550
|
++
other_hash=layer.other_hash,
|
|
551
|
++
combined_hash=layer.combined_hash,
|
|
552
|
++
)
|
|
553
|
++
for layer in pi.layers
|
|
554
|
++
],
|
|
555
|
++
)
|
|
556
|
++
all_proofs.append(proof)
|
|
557
|
++
|
|
558
|
++
store_proof = StoreProofsHashes(store_id=request.store_id, proofs=all_proofs)
|
|
559
|
++
return GetProofResponse(
|
|
560
|
++
proof=DLProof(
|
|
561
|
++
store_proofs=store_proof,
|
|
562
|
++
coin_id=root.coin_id,
|
|
563
|
++
inner_puzzle_hash=root.inner_puzzle_hash,
|
|
564
|
++
),
|
|
565
|
++
success=True,
|
|
566
|
++
)
|
|
567
|
++
|
|
568
|
++
@streamable_marshal
|
|
569
|
++
async def verify_proof(self, request: DLProof) -> VerifyProofResponse:
|
|
570
|
++
response = await self.service.wallet_rpc.dl_verify_proof(request)
|
|
571
|
++
return response
|
|
@@@ -32,17 -32,17 +32,29 @@@ class DataLayerRpcClient(RpcClient)
|
|
|
32
32
|
response = await self.fetch("batch_update", {"id": store_id.hex(), "changelist": changelist, "fee": fee})
|
|
33
33
|
return response
|
|
34
34
|
|
|
35
|
--
async def get_keys_values(
|
|
35
|
++
async def get_keys_values(
|
|
36
|
++
self, store_id: bytes32, root_hash: Optional[bytes32], page: Optional[int], max_page_size: Optional[int]
|
|
37
|
++
) -> Dict[str, Any]:
|
|
36
38
|
request: Dict[str, Any] = {"id": store_id.hex()}
|
|
37
39
|
if root_hash is not None:
|
|
38
40
|
request["root_hash"] = root_hash.hex()
|
|
41
|
++
if page is not None:
|
|
42
|
++
request["page"] = page
|
|
43
|
++
if max_page_size is not None:
|
|
44
|
++
request["max_page_size"] = max_page_size
|
|
39
45
|
response = await self.fetch("get_keys_values", request)
|
|
40
46
|
return response
|
|
41
47
|
|
|
42
|
--
async def get_keys(
|
|
48
|
++
async def get_keys(
|
|
49
|
++
self, store_id: bytes32, root_hash: Optional[bytes32], page: Optional[int], max_page_size: Optional[int]
|
|
50
|
++
) -> Dict[str, Any]:
|
|
43
51
|
request: Dict[str, Any] = {"id": store_id.hex()}
|
|
44
52
|
if root_hash is not None:
|
|
45
53
|
request["root_hash"] = root_hash.hex()
|
|
54
|
++
if page is not None:
|
|
55
|
++
request["page"] = page
|
|
56
|
++
if max_page_size is not None:
|
|
57
|
++
request["max_page_size"] = max_page_size
|
|
46
58
|
response = await self.fetch("get_keys", request)
|
|
47
59
|
return response
|
|
48
60
|
|
|
@@@ -87,10 -87,10 +99,15 @@@
|
|
|
87
99
|
response = await self.fetch("add_missing_files", request)
|
|
88
100
|
return response
|
|
89
101
|
|
|
90
|
--
async def get_kv_diff(
|
|
91
|
--
|
|
92
|
--
|
|
93
|
--
)
|
|
102
|
++
async def get_kv_diff(
|
|
103
|
++
self, store_id: bytes32, hash_1: bytes32, hash_2: bytes32, page: Optional[int], max_page_size: Optional[int]
|
|
104
|
++
) -> Dict[str, Any]:
|
|
105
|
++
request: Dict[str, Any] = {"id": store_id.hex(), "hash_1": hash_1.hex(), "hash_2": hash_2.hex()}
|
|
106
|
++
if page is not None:
|
|
107
|
++
request["page"] = page
|
|
108
|
++
if max_page_size is not None:
|
|
109
|
++
request["max_page_size"] = max_page_size
|
|
110
|
++
response = await self.fetch("get_kv_diff", request)
|
|
94
111
|
return response
|
|
95
112
|
|
|
96
113
|
async def get_root_history(self, store_id: bytes32) -> Dict[str, Any]:
|
|
@@@ -131,3 -131,3 +148,12 @@@
|
|
|
131
148
|
request = ClearPendingRootsRequest(store_id=store_id)
|
|
132
149
|
response = await self.fetch("clear_pending_roots", request.marshal())
|
|
133
150
|
return response
|
|
151
|
++
|
|
152
|
++
async def get_proof(self, store_id: bytes32, keys: List[bytes]) -> Dict[str, Any]:
|
|
153
|
++
request: Dict[str, Any] = {"store_id": store_id.hex(), "keys": [key.hex() for key in keys]}
|
|
154
|
++
response = await self.fetch("get_proof", request)
|
|
155
|
++
return response
|
|
156
|
++
|
|
157
|
++
async def verify_proof(self, proof: Dict[str, Any]) -> Dict[str, Any]:
|
|
158
|
++
response = await self.fetch("verify_proof", proof)
|
|
159
|
++
return response
|
|
@@@ -31,7 -31,7 +31,7 @@@ class RpcClient
|
|
|
31
31
|
|
|
32
32
|
url: str
|
|
33
33
|
session: aiohttp.ClientSession
|
|
34
|
--
ssl_context: SSLContext
|
|
34
|
++
ssl_context: Optional[SSLContext]
|
|
35
35
|
hostname: str
|
|
36
36
|
port: uint16
|
|
37
37
|
closing_task: Optional[asyncio.Task] = None
|
|
@@@ -41,19 -41,19 +41,35 @@@
|
|
|
41
41
|
cls: Type[_T_RpcClient],
|
|
42
42
|
self_hostname: str,
|
|
43
43
|
port: uint16,
|
|
44
|
--
root_path: Path,
|
|
45
|
--
net_config: Dict[str, Any],
|
|
44
|
++
root_path: Optional[Path],
|
|
45
|
++
net_config: Optional[Dict[str, Any]],
|
|
46
46
|
) -> _T_RpcClient:
|
|
47
|
--
|
|
48
|
--
|
|
49
|
--
|
|
50
|
--
|
|
47
|
++
if (root_path is not None) != (net_config is not None):
|
|
48
|
++
raise ValueError("Either both or neither of root_path and net_config must be provided")
|
|
49
|
++
|
|
50
|
++
ssl_context: Optional[SSLContext]
|
|
51
|
++
if root_path is None:
|
|
52
|
++
scheme = "http"
|
|
53
|
++
ssl_context = None
|
|
54
|
++
else:
|
|
55
|
++
assert root_path is not None
|
|
56
|
++
assert net_config is not None
|
|
57
|
++
scheme = "https"
|
|
58
|
++
ca_crt_path, ca_key_path = private_ssl_ca_paths(root_path, net_config)
|
|
59
|
++
crt_path = root_path / net_config["daemon_ssl"]["private_crt"]
|
|
60
|
++
key_path = root_path / net_config["daemon_ssl"]["private_key"]
|
|
61
|
++
ssl_context = ssl_context_for_client(ca_crt_path, ca_key_path, crt_path, key_path)
|
|
62
|
++
|
|
63
|
++
timeout = 300
|
|
64
|
++
if net_config is not None:
|
|
65
|
++
timeout = net_config.get("rpc_timeout", timeout)
|
|
66
|
++
|
|
51
67
|
self = cls(
|
|
52
68
|
hostname=self_hostname,
|
|
53
69
|
port=port,
|
|
54
|
--
url=f"
|
|
70
|
++
url=f"{scheme}://{self_hostname}:{str(port)}/",
|
|
55
71
|
session=aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)),
|
|
56
|
--
ssl_context=
|
|
72
|
++
ssl_context=ssl_context,
|
|
57
73
|
)
|
|
58
74
|
|
|
59
75
|
return self
|
|
@@@ -64,8 -64,8 +80,8 @@@
|
|
|
64
80
|
cls: Type[_T_RpcClient],
|
|
65
81
|
self_hostname: str,
|
|
66
82
|
port: uint16,
|
|
67
|
--
root_path: Path,
|
|
68
|
--
net_config: Dict[str, Any],
|
|
83
|
++
root_path: Optional[Path] = None,
|
|
84
|
++
net_config: Optional[Dict[str, Any]] = None,
|
|
69
85
|
) -> AsyncIterator[_T_RpcClient]:
|
|
70
86
|
self = await cls.create(
|
|
71
87
|
self_hostname=self_hostname,
|
|
@@@ -11,8 -11,7 +11,9 @@@ from chia_rs import AugSchemeMPL, G1Ele
|
|
|
11
11
|
from clvm_tools.binutils import assemble
|
|
12
12
|
|
|
13
13
|
from chia.consensus.block_rewards import calculate_base_farmer_reward
|
|
14
|
+
from chia.consensus.default_constants import DEFAULT_CONSTANTS
|
|
14
15
|
from chia.data_layer.data_layer_errors import LauncherCoinNotFoundError
|
|
16
|
++
from chia.data_layer.data_layer_util import dl_verify_proof
|
|
15
17
|
from chia.data_layer.data_layer_wallet import DataLayerWallet
|
|
16
18
|
from chia.pools.pool_wallet import PoolWallet
|
|
17
19
|
from chia.pools.pool_wallet_info import FARMING_TO_POOL, PoolState, PoolWalletInfo, create_pool_state
|
|
@@@ -285,6 -271,6 +286,7 @@@ class WalletRpcApi
|
|
|
285
286
|
"/dl_get_mirrors": self.dl_get_mirrors,
|
|
286
287
|
"/dl_new_mirror": self.dl_new_mirror,
|
|
287
288
|
"/dl_delete_mirror": self.dl_delete_mirror,
|
|
289
|
++
"/dl_verify_proof": self.dl_verify_proof,
|
|
288
290
|
# Verified Credential
|
|
289
291
|
"/vc_mint": self.vc_mint,
|
|
290
292
|
"/vc_get": self.vc_get,
|
|
@@@ -499,14 -479,14 +501,31 @@@
|
|
|
499
501
|
return False, False
|
|
500
502
|
|
|
501
503
|
config: Dict[str, Any] = load_config(new_root, "config.yaml")
|
|
502
|
--
farmer_target = config["farmer"].get("xch_target_address")
|
|
503
|
--
pool_target = config["pool"].get("xch_target_address")
|
|
504
|
--
address_to_check: List[bytes32] = [
|
|
504
|
++
farmer_target = config["farmer"].get("xch_target_address", "")
|
|
505
|
++
pool_target = config["pool"].get("xch_target_address", "")
|
|
506
|
++
address_to_check: List[bytes32] = []
|
|
507
|
++
|
|
508
|
++
try:
|
|
509
|
++
farmer_decoded = decode_puzzle_hash(farmer_target)
|
|
510
|
++
address_to_check.append(farmer_decoded)
|
|
511
|
++
except ValueError:
|
|
512
|
++
farmer_decoded = None
|
|
513
|
++
|
|
514
|
++
try:
|
|
515
|
++
pool_decoded = decode_puzzle_hash(pool_target)
|
|
516
|
++
address_to_check.append(pool_decoded)
|
|
517
|
++
except ValueError:
|
|
518
|
++
pool_decoded = None
|
|
505
519
|
|
|
506
520
|
found_addresses: Set[bytes32] = match_address_to_sk(sk, address_to_check, max_ph_to_search)
|
|
521
|
++
found_farmer = False
|
|
522
|
++
found_pool = False
|
|
523
|
++
|
|
524
|
++
if farmer_decoded is not None:
|
|
525
|
++
found_farmer = farmer_decoded in found_addresses
|
|
507
526
|
|
|
508
|
--
|
|
509
|
--
|
|
527
|
++
if pool_decoded is not None:
|
|
528
|
++
found_pool = pool_decoded in found_addresses
|
|
510
529
|
|
|
511
530
|
return found_farmer, found_pool
|
|
512
531
|
|
|
@@@ -1929,9 -1920,9 +1948,7 @@@
|
|
|
1929
1948
|
"also": {
|
|
1930
1949
|
**info["also"],
|
|
1931
1950
|
"flags": ProofsChecker.from_program(
|
|
1932
|
--
uncurry_puzzle(
|
|
1933
|
--
Program(assemble(info["also"]["proofs_checker"])) # type: ignore[no-untyped-call]
|
|
1934
|
--
)
|
|
1951
|
++
uncurry_puzzle(Program(assemble(info["also"]["proofs_checker"])))
|
|
1935
1952
|
).flags,
|
|
1936
1953
|
},
|
|
1937
1954
|
}
|
|
@@@ -4328,6 -4386,6 +4345,19 @@@
|
|
|
4328
4345
|
"transactions": [tx.to_json_dict_convenience(self.service.config) for tx in txs],
|
|
4329
4346
|
}
|
|
4330
4347
|
|
|
4348
|
++
async def dl_verify_proof(
|
|
4349
|
++
self,
|
|
4350
|
++
request: Dict[str, Any],
|
|
4351
|
++
) -> EndpointResult:
|
|
4352
|
++
"""Verify a proof of inclusion for a DL singleton"""
|
|
4353
|
++
res = await dl_verify_proof(
|
|
4354
|
++
request,
|
|
4355
|
++
peer=self.service.get_full_node_peer(),
|
|
4356
|
++
wallet_node=self.service.wallet_state_manager.wallet_node,
|
|
4357
|
++
)
|
|
4358
|
++
|
|
4359
|
++
return res
|
|
4360
|
++
|
|
4331
4361
|
##########################################################################################
|
|
4332
4362
|
# Verified Credential
|
|
4333
4363
|
##########################################################################################
|
|
@@@ -1,7 -1,7 +1,8 @@@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
3
|
+
from typing import Any, Dict, List, Optional, Tuple, Union, cast
|
|
4
4
|
|
|
5
|
++
from chia.data_layer.data_layer_util import DLProof, VerifyProofResponse
|
|
5
6
|
from chia.data_layer.data_layer_wallet import Mirror, SingletonRecord
|
|
6
7
|
from chia.pools.pool_wallet_info import PoolWalletInfo
|
|
7
8
|
from chia.rpc.rpc_client import RpcClient
|
|
@@@ -1245,11 -1292,30 +1246,15 @@@ class WalletRpcClient(RpcClient)
|
|
|
1245
1246
|
)
|
|
1246
1247
|
return [TransactionRecord.from_json_dict_convenience(tx) for tx in response["transactions"]]
|
|
1247
1248
|
|
|
1248
|
-
async def
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
if pagination is not None:
|
|
1255
|
-
if pagination[0] is not None:
|
|
1256
|
-
request["start"] = pagination[0]
|
|
1257
|
-
if pagination[1] is not None:
|
|
1258
|
-
request["end"] = pagination[1]
|
|
1259
|
-
response = await self.fetch("get_notifications", request)
|
|
1260
|
-
return [
|
|
1261
|
-
Notification(
|
|
1262
|
-
bytes32.from_hexstr(notification["id"]),
|
|
1263
|
-
bytes.fromhex(notification["message"]),
|
|
1264
|
-
uint64(notification["amount"]),
|
|
1265
|
-
uint32(notification["height"]),
|
|
1266
|
-
)
|
|
1267
|
-
for notification in response["notifications"]
|
|
1268
|
-
]
|
|
1249
|
++
async def dl_verify_proof(self, request: DLProof) -> VerifyProofResponse:
|
|
1250
|
++
response = await self.fetch(path="dl_verify_proof", request_json=request.to_json_dict())
|
|
1251
|
++
return VerifyProofResponse.from_json_dict(response)
|
|
1252
|
++
|
|
1253
|
+
async def get_notifications(self, request: GetNotifications) -> GetNotificationsResponse:
|
|
1254
|
+
return GetNotificationsResponse.from_json_dict(await self.fetch("get_notifications", request.to_json_dict()))
|
|
1269
1255
|
|
|
1270
1256
|
async def delete_notifications(self, ids: Optional[List[bytes32]] = None) -> bool:
|
|
1271
|
-
request
|
|
1257
|
+
request = {}
|
|
1272
1258
|
if ids is not None:
|
|
1273
1259
|
request["ids"] = [id.hex() for id in ids]
|
|
1274
1260
|
response = await self.fetch("delete_notifications", request)
|
|
@@@ -2057,10 -2427,8 +2057,10 @@@ def create_block_tools
|
|
|
2057
2057
|
return bt
|
|
2058
2058
|
|
|
2059
2059
|
|
|
2060
|
-
def make_unfinished_block(
|
|
2061
|
-
|
|
2060
|
+
def make_unfinished_block(
|
|
2061
|
+
block: FullBlock, constants: ConsensusConstants, *, force_overflow: bool = False
|
|
2062
|
+
) -> UnfinishedBlock:
|
|
2062
|
-
if force_overflow or is_overflow_block(constants,
|
|
2063
|
++
if force_overflow or is_overflow_block(constants, block.reward_chain_block.signage_point_index):
|
|
2063
2064
|
finished_ss = block.finished_sub_slots[:-1]
|
|
2064
2065
|
else:
|
|
2065
2066
|
finished_ss = block.finished_sub_slots
|
|
@@@ -1,7 -1,7 +1,7 @@@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import io
|
|
4
|
-
from typing import Any, Callable, Dict, Optional, Set, Tuple
|
|
5
|
-
from typing import Any, Callable, Dict, Set, Tuple
|
|
4
|
++
from typing import Any, Callable, Dict, Optional, Set, Tuple, Type
|
|
6
5
|
|
|
7
6
|
from chia_rs import ALLOW_BACKREFS, run_chia_program, tree_hash
|
|
8
7
|
from clvm import SExp
|
|
@@@ -48,6 -48,6 +48,16 @@@ class Program(SExp)
|
|
|
48
48
|
def fromhex(cls, hexstr: str) -> Program:
|
|
49
49
|
return cls.from_bytes(hexstr_to_bytes(hexstr))
|
|
50
50
|
|
|
51
|
++
@classmethod
|
|
52
|
++
def from_json_dict(cls: Type[Program], json_dict: Any) -> Program:
|
|
53
|
++
if isinstance(json_dict, cls):
|
|
54
|
++
return json_dict
|
|
55
|
++
item = hexstr_to_bytes(json_dict)
|
|
56
|
++
return cls.from_bytes(item)
|
|
57
|
++
|
|
58
|
++
def to_json_dict(self) -> str:
|
|
59
|
++
return f"0x{self}"
|
|
60
|
++
|
|
51
61
|
def __bytes__(self) -> bytes:
|
|
52
62
|
f = io.BytesIO()
|
|
53
63
|
self.stream(f) # noqa
|
|
@@@ -43,114 -27,9 +43,114 @@@ class DedupCoinSpend
|
|
|
43
43
|
cost: Optional[uint64]
|
|
44
44
|
|
|
45
45
|
|
|
46
|
+
@dataclasses.dataclass(frozen=True)
|
|
47
|
+
class UnspentLineageInfo:
|
|
48
|
+
coin_id: bytes32
|
|
46
|
-
coin_amount:
|
|
49
|
++
coin_amount: uint64
|
|
50
|
+
parent_id: bytes32
|
|
47
|
-
parent_amount:
|
|
51
|
++
parent_amount: uint64
|
|
52
|
+
parent_parent_id: bytes32
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def set_next_singleton_version(
|
|
56
|
+
current_singleton: Coin, singleton_additions: List[Coin], fast_forward_spends: Dict[bytes32, UnspentLineageInfo]
|
|
57
|
+
) -> None:
|
|
58
|
+
"""
|
|
59
|
+
Finds the next version of the singleton among its additions and updates the
|
|
60
|
+
fast forward spends, currently chained together, accordingly
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
current_singleton: the current iteration of the singleton
|
|
64
|
+
singleton_additions: the additions of the current singleton
|
|
65
|
+
fast_forward_spends: in-out parameter of the spends currently chained together
|
|
66
|
+
|
|
67
|
+
Raises:
|
|
68
|
+
ValueError if none of the additions are considered to be the singleton's
|
|
69
|
+
next iteration
|
|
70
|
+
"""
|
|
71
|
+
singleton_child = next(
|
|
72
|
+
(addition for addition in singleton_additions if addition.puzzle_hash == current_singleton.puzzle_hash), None
|
|
73
|
+
)
|
|
74
|
+
if singleton_child is None:
|
|
75
|
+
raise ValueError("Could not find fast forward child singleton.")
|
|
76
|
+
# Keep track of this in order to chain the next ff
|
|
77
|
+
fast_forward_spends[current_singleton.puzzle_hash] = UnspentLineageInfo(
|
|
78
|
+
coin_id=singleton_child.name(),
|
|
79
|
+
coin_amount=singleton_child.amount,
|
|
80
|
+
parent_id=singleton_child.parent_coin_info,
|
|
81
|
+
parent_amount=current_singleton.amount,
|
|
82
|
+
parent_parent_id=current_singleton.parent_coin_info,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def perform_the_fast_forward(
|
|
87
|
+
unspent_lineage_info: UnspentLineageInfo,
|
|
88
|
+
spend_data: BundleCoinSpend,
|
|
89
|
+
fast_forward_spends: Dict[bytes32, UnspentLineageInfo],
|
|
90
|
+
) -> Tuple[CoinSpend, List[Coin]]:
|
|
91
|
+
"""
|
|
92
|
+
Performs a singleton fast forward, including the updating of all previous
|
|
93
|
+
additions to point to the most recent version, and updates the fast forward
|
|
94
|
+
spends, currently chained together, accordingly
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
unspent_lineage_info: the singleton's most recent lineage information
|
|
98
|
+
spend_data: the current spend's data
|
|
99
|
+
fast_forward_spends: in-out parameter of the spends currently chained together
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
CoinSpend: the new coin spend after performing the fast forward
|
|
103
|
+
List[Coin]: the updated additions that point to the new coin to spend
|
|
104
|
+
|
|
105
|
+
Raises:
|
|
106
|
+
ValueError if none of the additions are considered to be the singleton's
|
|
107
|
+
next iteration
|
|
108
|
+
"""
|
|
109
|
+
new_coin = Coin(
|
|
110
|
+
unspent_lineage_info.parent_id, spend_data.coin_spend.coin.puzzle_hash, unspent_lineage_info.coin_amount
|
|
111
|
+
)
|
|
112
|
+
new_parent = Coin(
|
|
113
|
+
unspent_lineage_info.parent_parent_id,
|
|
114
|
+
spend_data.coin_spend.coin.puzzle_hash,
|
|
115
|
+
unspent_lineage_info.parent_amount,
|
|
116
|
+
)
|
|
117
|
+
# These hold because puzzle hash is not expected to change
|
|
118
|
+
assert new_coin.name() == unspent_lineage_info.coin_id
|
|
119
|
+
assert new_parent.name() == unspent_lineage_info.parent_id
|
|
120
|
+
rust_coin_spend = RustCoinSpend(
|
|
121
|
+
coin=spend_data.coin_spend.coin,
|
|
122
|
+
puzzle_reveal=RustProgram.from_bytes(bytes(spend_data.coin_spend.puzzle_reveal)),
|
|
123
|
+
solution=RustProgram.from_bytes(bytes(spend_data.coin_spend.solution)),
|
|
124
|
+
)
|
|
125
|
+
new_solution = SerializedProgram.from_bytes(
|
|
126
|
+
fast_forward_singleton(spend=rust_coin_spend, new_coin=new_coin, new_parent=new_parent)
|
|
127
|
+
)
|
|
128
|
+
singleton_child = None
|
|
129
|
+
patched_additions = []
|
|
130
|
+
for addition in spend_data.additions:
|
|
131
|
+
patched_addition = Coin(unspent_lineage_info.coin_id, addition.puzzle_hash, addition.amount)
|
|
132
|
+
patched_additions.append(patched_addition)
|
|
133
|
+
if addition.puzzle_hash == spend_data.coin_spend.coin.puzzle_hash:
|
|
134
|
+
# We found the next version of this singleton
|
|
135
|
+
singleton_child = patched_addition
|
|
136
|
+
if singleton_child is None:
|
|
137
|
+
raise ValueError("Could not find fast forward child singleton.")
|
|
138
|
+
new_coin_spend = CoinSpend(new_coin, spend_data.coin_spend.puzzle_reveal, new_solution)
|
|
139
|
+
# Keep track of this in order to chain the next ff
|
|
140
|
+
fast_forward_spends[spend_data.coin_spend.coin.puzzle_hash] = UnspentLineageInfo(
|
|
141
|
+
coin_id=singleton_child.name(),
|
|
142
|
+
coin_amount=singleton_child.amount,
|
|
143
|
+
parent_id=singleton_child.parent_coin_info,
|
|
144
|
+
parent_amount=unspent_lineage_info.coin_amount,
|
|
145
|
+
parent_parent_id=unspent_lineage_info.parent_id,
|
|
146
|
+
)
|
|
147
|
+
return new_coin_spend, patched_additions
|
|
148
|
+
|
|
149
|
+
|
|
48
150
|
@dataclasses.dataclass(frozen=True)
|
|
49
151
|
class EligibleCoinSpends:
|
|
50
|
-
|
|
152
|
+
deduplication_spends: Dict[bytes32, DedupCoinSpend] = dataclasses.field(default_factory=dict)
|
|
153
|
+
fast_forward_spends: Dict[bytes32, UnspentLineageInfo] = dataclasses.field(default_factory=dict)
|
|
51
154
|
|
|
52
155
|
def get_deduplication_info(
|
|
53
156
|
self, *, bundle_coin_spends: Dict[bytes32, BundleCoinSpend], max_cost: int
|
|
@@@ -1,5 -1,5 +1,7 @@@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
--
|
|
3
|
++
import chia_rs
|
|
4
4
|
|
|
5
|
--
|
|
5
|
++
ELIGIBLE_FOR_DEDUP = chia_rs.ELIGIBLE_FOR_DEDUP
|
|
6
|
++
Spend = chia_rs.Spend
|
|
7
|
++
SpendBundleConditions = chia_rs.SpendBundleConditions
|
|
@@@ -80,7 -80,7 +80,7 @@@ def make_aggsig_final_message
|
|
|
80
80
|
if isinstance(spend, Coin):
|
|
81
81
|
coin = spend
|
|
82
82
|
elif isinstance(spend, Spend):
|
|
83
|
--
coin = Coin(spend.parent_id, spend.puzzle_hash, spend.coin_amount)
|
|
83
|
++
coin = Coin(spend.parent_id, spend.puzzle_hash, uint64(spend.coin_amount))
|
|
84
84
|
else:
|
|
85
85
|
raise ValueError(f"Expected Coin or Spend, got {type(spend)}") # pragma: no cover
|
|
86
86
|
|
|
@@@ -51,7 -48,7 +51,7 @@@ def additions_for_npc(npc_result: NPCRe
|
|
|
51
51
|
return []
|
|
52
52
|
for spend in npc_result.conds.spends:
|
|
53
53
|
for puzzle_hash, amount, _ in spend.create_coin:
|
|
54
|
--
coin = Coin(spend.coin_id, puzzle_hash, amount)
|
|
54
|
++
coin = Coin(spend.coin_id, puzzle_hash, uint64(amount))
|
|
55
55
|
additions.append(coin)
|
|
56
56
|
|
|
57
57
|
return additions
|
|
@@@ -687,8 -650,8 +687,6 @@@ data_layer
|
|
|
687
687
|
|
|
688
688
|
logging: *logging
|
|
689
689
|
|
|
690
|
--
# TODO: which of these are really appropriate?
|
|
691
|
--
|
|
692
690
|
ssl:
|
|
693
691
|
private_crt: "config/ssl/data_layer/private_data_layer.crt"
|
|
694
692
|
private_key: "config/ssl/data_layer/private_data_layer.key"
|
|
@@@ -8,6 -7,6 +8,7 @@@ import o
|
|
|
8
8
|
import signal
|
|
9
9
|
import sys
|
|
10
10
|
from dataclasses import dataclass
|
|
11
|
++
from inspect import getframeinfo, stack
|
|
11
12
|
from pathlib import Path
|
|
12
13
|
from types import FrameType
|
|
13
14
|
from typing import (
|
|
@@@ -19,16 -17,15 +20,21 @@@
|
|
|
19
20
|
ContextManager,
|
|
20
21
|
Dict,
|
|
21
22
|
Generic,
|
|
23
|
++
Iterable,
|
|
22
24
|
Iterator,
|
|
23
25
|
List,
|
|
24
26
|
Optional,
|
|
25
27
|
Sequence,
|
|
28
|
++
Tuple,
|
|
29
|
++
Type,
|
|
26
30
|
TypeVar,
|
|
27
31
|
Union,
|
|
28
32
|
final,
|
|
33
|
++
get_args,
|
|
34
|
++
get_origin,
|
|
29
35
|
)
|
|
30
36
|
|
|
37
|
+
import psutil
|
|
31
38
|
from typing_extensions import Protocol
|
|
32
39
|
|
|
33
40
|
from chia.util.errors import InvalidPathError
|
|
@@@ -388,36 -374,3 +394,83 @@@ async def split_async_manager(manager:
|
|
|
388
394
|
yield split
|
|
389
395
|
finally:
|
|
390
396
|
await split.exit(if_needed=True)
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
class ValuedEventSentinel:
|
|
400
|
+
pass
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
@dataclasses.dataclass
|
|
404
|
+
class ValuedEvent(Generic[T]):
|
|
405
|
+
_value_sentinel: ClassVar[ValuedEventSentinel] = ValuedEventSentinel()
|
|
406
|
+
|
|
407
|
+
_event: asyncio.Event = dataclasses.field(default_factory=asyncio.Event)
|
|
408
|
+
_value: Union[ValuedEventSentinel, T] = _value_sentinel
|
|
409
|
+
|
|
410
|
+
def set(self, value: T) -> None:
|
|
411
|
+
if not isinstance(self._value, ValuedEventSentinel):
|
|
412
|
+
raise Exception("Value already set")
|
|
413
|
+
self._value = value
|
|
414
|
+
self._event.set()
|
|
415
|
+
|
|
416
|
+
async def wait(self) -> T:
|
|
417
|
+
await self._event.wait()
|
|
418
|
+
if isinstance(self._value, ValuedEventSentinel):
|
|
419
|
+
raise Exception("Value not set despite event being set")
|
|
420
|
+
return self._value
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
def available_logical_cores() -> int:
|
|
424
|
+
if sys.platform == "darwin":
|
|
425
|
+
count = os.cpu_count()
|
|
426
|
+
assert count is not None
|
|
427
|
+
return count
|
|
428
|
+
|
|
429
|
+
return len(psutil.Process().cpu_affinity())
|
|
430
|
++
|
|
431
|
++
|
|
432
|
++
def caller_file_and_line(distance: int = 1, relative_to: Iterable[Path] = ()) -> Tuple[str, int]:
|
|
433
|
++
caller = getframeinfo(stack()[distance + 1][0])
|
|
434
|
++
|
|
435
|
++
caller_path = Path(caller.filename)
|
|
436
|
++
options: List[str] = [caller_path.as_posix()]
|
|
437
|
++
for path in relative_to:
|
|
438
|
++
try:
|
|
439
|
++
options.append(caller_path.relative_to(path).as_posix())
|
|
440
|
++
except ValueError:
|
|
441
|
++
pass
|
|
442
|
++
|
|
443
|
++
return min(options, key=len), caller.lineno
|
|
444
|
++
|
|
445
|
++
|
|
446
|
++
def satisfies_hint(obj: T, type_hint: Type[T]) -> bool:
|
|
447
|
++
"""
|
|
448
|
++
Check if an object satisfies a type hint.
|
|
449
|
++
This is a simplified version of `isinstance` that also handles generic types.
|
|
450
|
++
"""
|
|
451
|
++
# Start from the initial type hint
|
|
452
|
++
object_hint_pairs = [(obj, type_hint)]
|
|
453
|
++
while len(object_hint_pairs) > 0:
|
|
454
|
++
obj, type_hint = object_hint_pairs.pop()
|
|
455
|
++
origin = get_origin(type_hint)
|
|
456
|
++
args = get_args(type_hint)
|
|
457
|
++
if origin:
|
|
458
|
++
# Handle generic types
|
|
459
|
++
if not isinstance(obj, origin):
|
|
460
|
++
return False
|
|
461
|
++
if len(args) > 0:
|
|
462
|
++
# Tuple[T, ...] gets handled just like List[T]
|
|
463
|
++
if origin is list or (origin is tuple and args[-1] is Ellipsis):
|
|
464
|
++
object_hint_pairs.extend((item, args[0]) for item in obj)
|
|
465
|
++
elif origin is tuple:
|
|
466
|
++
object_hint_pairs.extend((item, arg) for item, arg in zip(obj, args))
|
|
467
|
++
elif origin is dict:
|
|
468
|
++
object_hint_pairs.extend((k, args[0]) for k in obj.keys())
|
|
469
|
++
object_hint_pairs.extend((v, args[1]) for v in obj.values())
|
|
470
|
++
else:
|
|
471
|
++
raise NotImplementedError(f"Type {origin} is not yet supported")
|
|
472
|
++
else:
|
|
473
|
++
# Handle concrete types
|
|
474
|
++
if type(obj) is not type_hint:
|
|
475
|
++
return False
|
|
476
|
++
return True
|
|
@@@ -81,11 -81,11 +81,6 @@@ class ConversionError(StreamableError)
|
|
|
81
81
|
)
|
|
82
82
|
|
|
83
83
|
|
|
84
|
--
unhashable_types = [
|
|
85
|
--
"Program",
|
|
86
|
--
"SerializedProgram",
|
|
87
|
--
]
|
|
88
|
--
|
|
89
84
|
_T_Streamable = TypeVar("_T_Streamable", bound="Streamable")
|
|
90
85
|
|
|
91
86
|
ParseFunctionType = Callable[[BinaryIO], object]
|
|
@@@ -183,20 -183,20 +178,6 @@@ def convert_byte_type(f_type: Type[Any]
|
|
|
183
178
|
raise ConversionError(item, f_type, e) from e
|
|
184
179
|
|
|
185
180
|
|
|
186
|
--
def convert_unhashable_type(f_type: Type[Any], item: Any) -> Any:
|
|
187
|
--
if isinstance(item, f_type):
|
|
188
|
--
return item
|
|
189
|
--
if not isinstance(item, bytes):
|
|
190
|
--
item = convert_hex_string(item)
|
|
191
|
--
try:
|
|
192
|
--
if hasattr(f_type, "from_bytes_unchecked"):
|
|
193
|
--
return f_type.from_bytes_unchecked(item)
|
|
194
|
--
else:
|
|
195
|
--
return f_type.from_bytes(item)
|
|
196
|
--
except Exception as e:
|
|
197
|
--
raise ConversionError(item, f_type, e) from e
|
|
198
|
--
|
|
199
|
--
|
|
200
181
|
def convert_primitive(f_type: Type[Any], item: Any) -> Any:
|
|
201
182
|
if isinstance(item, f_type):
|
|
202
183
|
return item
|
|
@@@ -242,9 -242,9 +223,6 @@@ def function_to_convert_one_item(f_type
|
|
|
242
223
|
convert_inner_func = function_to_convert_one_item(inner_type)
|
|
243
224
|
# Ignoring for now as the proper solution isn't obvious
|
|
244
225
|
return lambda items: convert_list(convert_inner_func, items) # type: ignore[arg-type]
|
|
245
|
--
elif f_type.__name__ in unhashable_types:
|
|
246
|
--
# Type is unhashable (bls type), so cast from hex string
|
|
247
|
--
return lambda item: convert_unhashable_type(f_type, item)
|
|
248
226
|
elif hasattr(f_type, "from_json_dict"):
|
|
249
227
|
return lambda item: f_type.from_json_dict(item)
|
|
250
228
|
elif issubclass(f_type, bytes):
|
|
@@@ -292,13 -292,10 +270,12 @@@ def function_to_post_init_process_one_i
|
|
|
292
270
|
|
|
293
271
|
def recurse_jsonify(d: Any) -> Any:
|
|
294
272
|
"""
|
|
295
|
--
Makes bytes objects
|
|
296
|
--
strings.
|
|
273
|
++
Makes bytes objects into strings with 0x, and makes large ints into strings.
|
|
297
274
|
"""
|
|
298
|
-
if
|
|
275
|
+
if hasattr(d, "override_json_serialization"):
|
|
276
|
+
overrid_ret: Union[List[Any], Dict[str, Any], str, None, int] = d.override_json_serialization(recurse_jsonify)
|
|
277
|
+
return overrid_ret
|
|
278
|
+
elif dataclasses.is_dataclass(d):
|
|
299
279
|
new_dict = {}
|
|
300
280
|
for field in dataclasses.fields(d):
|
|
301
281
|
new_dict[field.name] = recurse_jsonify(getattr(d, field.name))
|
|
@@@ -316,7 -313,7 +293,7 @@@
|
|
|
316
293
|
new_dict[name] = recurse_jsonify(val)
|
|
317
294
|
return new_dict
|
|
318
295
|
|
|
319
|
--
elif
|
|
296
|
++
elif issubclass(type(d), bytes):
|
|
320
297
|
return f"0x{bytes(d).hex()}"
|
|
321
298
|
elif isinstance(d, Enum):
|
|
322
299
|
return d.name
|
|
@@@ -291,7 -292,7 +291,7 @@@ class DAOCATWallet
|
|
|
291
291
|
primaries = [
|
|
292
292
|
Payment(
|
|
293
293
|
new_innerpuzzle.get_tree_hash(),
|
|
294
|
--
|
|
294
|
++
vote_amount,
|
|
295
295
|
[standard_inner_puz.get_tree_hash()],
|
|
296
296
|
)
|
|
297
297
|
]
|
|
@@@ -301,12 -302,12 +301,12 @@@
|
|
|
301
301
|
conditions=(CreatePuzzleAnnouncement(message),),
|
|
302
302
|
)
|
|
303
303
|
else:
|
|
304
|
--
vote_amount = amount - running_sum
|
|
304
|
++
vote_amount = uint64(amount - running_sum)
|
|
305
305
|
running_sum = running_sum + coin.amount
|
|
306
306
|
primaries = [
|
|
307
307
|
Payment(
|
|
308
308
|
new_innerpuzzle.get_tree_hash(),
|
|
309
|
--
|
|
309
|
++
vote_amount,
|
|
310
310
|
[standard_inner_puz.get_tree_hash()],
|
|
311
311
|
),
|
|
312
312
|
]
|
|
@@@ -646,7 -642,7 +646,7 @@@ class DAOWallet
|
|
|
646
646
|
|
|
647
647
|
genesis_launcher_puz = SINGLETON_LAUNCHER
|
|
648
648
|
# launcher coin contains singleton launcher, launcher coin ID == singleton_id == treasury_id
|
|
649
|
--
launcher_coin = Coin(origin.name(), genesis_launcher_puz.get_tree_hash(), 1)
|
|
649
|
++
launcher_coin = Coin(origin.name(), genesis_launcher_puz.get_tree_hash(), uint64(1))
|
|
650
650
|
|
|
651
651
|
if cat_tail_hash is None:
|
|
652
652
|
assert amount_of_cats_to_create is not None
|
|
@@@ -3,6 -3,6 +3,7 @@@ from __future__ import annotation
|
|
|
3
3
|
from typing import Iterator, List, Tuple, Union
|
|
4
4
|
|
|
5
5
|
from chia.types.blockchain_format.program import Program
|
|
6
|
++
from chia.types.blockchain_format.serialized_program import SerializedProgram
|
|
6
7
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
7
8
|
from chia.types.condition_opcodes import ConditionOpcode
|
|
8
9
|
from chia.util.ints import uint64
|
|
@@@ -39,7 -39,7 +40,7 @@@ def create_host_layer_puzzle(innerpuz:
|
|
|
39
40
|
)
|
|
40
41
|
|
|
41
42
|
|
|
42
|
--
def match_dl_singleton(puzzle: Program) -> Tuple[bool, Iterator[Program]]:
|
|
43
|
++
def match_dl_singleton(puzzle: Union[Program, SerializedProgram]) -> Tuple[bool, Iterator[Program]]:
|
|
43
44
|
"""
|
|
44
45
|
Given a puzzle test if it's a CAT and, if it is, return the curried arguments
|
|
45
46
|
"""
|
|
@@@ -1365,17 -1403,17 +1365,20 @@@ class DIDWallet
|
|
|
1365
1365
|
return True
|
|
1366
1366
|
|
|
1367
1367
|
async def update_metadata(self, metadata: Dict[str, str]) -> bool:
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
self.did_info.
|
|
1373
|
-
self.did_info.
|
|
1374
|
-
self.did_info.
|
|
1375
|
-
self.did_info.
|
|
1376
|
-
self.did_info.
|
|
1377
|
-
self.did_info.
|
|
1378
|
-
|
|
1368
|
++
# validate metadata
|
|
1369
|
++
if not all(isinstance(k, str) and isinstance(v, str) for k, v in metadata.items()):
|
|
1370
|
++
raise ValueError("Metadata key value pairs must be strings.")
|
|
1371
|
+
did_info = DIDInfo(
|
|
1372
|
+
origin_coin=self.did_info.origin_coin,
|
|
1373
|
+
backup_ids=self.did_info.backup_ids,
|
|
1374
|
+
num_of_backup_ids_needed=self.did_info.num_of_backup_ids_needed,
|
|
1375
|
+
parent_info=self.did_info.parent_info,
|
|
1376
|
+
current_inner=self.did_info.current_inner,
|
|
1377
|
+
temp_coin=self.did_info.temp_coin,
|
|
1378
|
+
temp_puzhash=self.did_info.temp_puzhash,
|
|
1379
|
+
temp_pubkey=self.did_info.temp_pubkey,
|
|
1380
|
+
sent_recovery_transaction=self.did_info.sent_recovery_transaction,
|
|
1381
|
+
metadata=json.dumps(metadata),
|
|
1379
1382
|
)
|
|
1380
1383
|
await self.save_info(did_info)
|
|
1381
1384
|
await self.wallet_state_manager.update_wallet_puzzle_hashes(self.wallet_info.id)
|
|
@@@ -18,9 -18,9 +18,9 @@@ class KeyValStore
|
|
|
18
18
|
self.db_wrapper = db_wrapper
|
|
19
19
|
async with self.db_wrapper.writer_maybe_transaction() as conn:
|
|
20
20
|
await conn.execute("CREATE TABLE IF NOT EXISTS key_val_store(key text PRIMARY KEY, value blob)")
|
|
21
|
--
|
|
22
|
--
|
|
23
|
--
|
|
21
|
++
# Remove an old redundant index on the primary key
|
|
22
|
++
# See https://github.com/Chia-Network/chia-blockchain/issues/10276
|
|
23
|
++
await conn.execute("DROP INDEX IF EXISTS key_val_name")
|
|
24
24
|
return self
|
|
25
25
|
|
|
26
26
|
async def get_object(self, key: str, object_type: Any) -> Any:
|
|
@@@ -104,7 -104,7 +104,7 @@@ def decode_info_value(cls: Any, value:
|
|
|
104
104
|
else:
|
|
105
105
|
if value == "()": # special case
|
|
106
106
|
return Program.to([])
|
|
107
|
--
expression: SExp = assemble(value)
|
|
107
|
++
expression: SExp = assemble(value)
|
|
108
108
|
if expression.atom is None:
|
|
109
109
|
return Program(expression)
|
|
110
110
|
else:
|
|
@@@ -6,8 -6,8 +6,9 @@@ from typing import Any, List, Optional
|
|
|
6
6
|
from chia.consensus.default_constants import DEFAULT_CONSTANTS
|
|
7
7
|
from chia.types.blockchain_format.coin import Coin
|
|
8
8
|
from chia.types.blockchain_format.program import Program
|
|
9
|
++
from chia.types.blockchain_format.serialized_program import SerializedProgram
|
|
9
10
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
10
|
-
from chia.types.coin_spend import CoinSpend
|
|
11
|
+
from chia.types.coin_spend import CoinSpend, make_spend
|
|
11
12
|
from chia.types.condition_opcodes import ConditionOpcode
|
|
12
13
|
from chia.util.condition_tools import conditions_for_solution
|
|
13
14
|
from chia.util.ints import uint64
|
|
@@@ -121,17 -121,17 +122,15 @@@ def create_merkle_solution
|
|
|
121
122
|
|
|
122
123
|
|
|
123
124
|
def match_clawback_puzzle(
|
|
124
|
--
uncurried: UncurriedPuzzle,
|
|
125
|
++
uncurried: UncurriedPuzzle,
|
|
126
|
++
inner_puzzle: Union[Program, SerializedProgram],
|
|
127
|
++
inner_solution: Union[Program, SerializedProgram],
|
|
125
128
|
) -> Optional[ClawbackMetadata]:
|
|
126
129
|
# Check if the inner puzzle is a P2 puzzle
|
|
127
130
|
if MOD != uncurried.mod:
|
|
128
131
|
return None
|
|
129
132
|
# Fetch Remark condition
|
|
130
|
--
conditions = conditions_for_solution(
|
|
131
|
--
inner_puzzle,
|
|
132
|
--
inner_solution,
|
|
133
|
--
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM // 8,
|
|
134
|
--
)
|
|
133
|
++
conditions = conditions_for_solution(inner_puzzle, inner_solution, DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM // 8)
|
|
135
134
|
metadata: Optional[ClawbackMetadata] = None
|
|
136
135
|
new_puzhash: Set[bytes32] = set()
|
|
137
136
|
if conditions is not None:
|
|
@@@ -29,7 -29,7 +29,7 @@@ def make_puzzle(amount: int) -> int
|
|
|
29
29
|
# print(puzzle)
|
|
30
30
|
|
|
31
31
|
# TODO: properly type hint clvm_tools
|
|
32
|
--
assembled_puzzle = binutils.assemble(puzzle)
|
|
32
|
++
assembled_puzzle = binutils.assemble(puzzle)
|
|
33
33
|
puzzle_prog = Program.to(assembled_puzzle)
|
|
34
34
|
print("Program: ", puzzle_prog)
|
|
35
35
|
puzzle_hash = puzzle_prog.get_tree_hash()
|
|
@@@ -52,15 -51,11 +52,11 @@@ async def main() -> None
|
|
|
52
52
|
ph1 = decode_puzzle_hash(address1)
|
|
53
53
|
ph2 = decode_puzzle_hash(address2)
|
|
54
54
|
|
|
55
|
-
p_farmer_2 =
|
|
56
|
-
binutils.assemble(f"(q . ((51 0x{ph1.hex()} {farmer_amounts}) (51 0x{ph2.hex()} {farmer_amounts})))")
|
|
55
|
+
p_farmer_2 = SerializedProgram.to(
|
|
57
|
-
binutils.assemble(
|
|
58
|
-
f"(q . ((51 0x{ph1.hex()} {farmer_amounts}) " f"(51 0x{ph2.hex()} {farmer_amounts})))"
|
|
59
|
-
) # type: ignore[no-untyped-call]
|
|
56
|
++
binutils.assemble(f"(q . ((51 0x{ph1.hex()} {farmer_amounts}) " f"(51 0x{ph2.hex()} {farmer_amounts})))")
|
|
60
57
|
)
|
|
61
|
-
p_pool_2 =
|
|
62
|
-
binutils.assemble(f"(q . ((51 0x{ph1.hex()} {pool_amounts}) (51 0x{ph2.hex()} {pool_amounts})))")
|
|
58
|
+
p_pool_2 = SerializedProgram.to(
|
|
63
|
-
binutils.assemble(
|
|
64
|
-
f"(q . ((51 0x{ph1.hex()} {pool_amounts}) " f"(51 0x{ph2.hex()} {pool_amounts})))"
|
|
65
|
-
) # type: ignore[no-untyped-call]
|
|
59
|
++
binutils.assemble(f"(q . ((51 0x{ph1.hex()} {pool_amounts}) " f"(51 0x{ph2.hex()} {pool_amounts})))")
|
|
66
60
|
)
|
|
67
61
|
|
|
68
62
|
print(f"Ph1: {ph1.hex()}")
|
|
@@@ -68,7 -63,7 +64,7 @@@
|
|
|
68
64
|
assert ph1.hex() == "1b7ab2079fa635554ad9bd4812c622e46ee3b1875a7813afba127bb0cc9794f9"
|
|
69
65
|
assert ph2.hex() == "6f184a7074c925ef8688ce56941eb8929be320265f824ec7e351356cc745d38a"
|
|
70
66
|
|
|
71
|
-
p_solution = SerializedProgram.to(binutils.assemble("()"))
|
|
72
|
-
p_solution = Program.to(binutils.assemble("()"))
|
|
67
|
++
p_solution = SerializedProgram.to(binutils.assemble("()"))
|
|
73
68
|
|
|
74
69
|
sb_farmer = SpendBundle([CoinSpend(farmer_prefarm, p_farmer_2, p_solution)], G2Element())
|
|
75
70
|
sb_pool = SpendBundle([CoinSpend(pool_prefarm, p_pool_2, p_solution)], G2Element())
|
|
@@@ -152,7 -153,7 +152,8 @@@ class TradeStore
|
|
|
152
152
|
|
|
153
153
|
await conn.execute("CREATE INDEX IF NOT EXISTS trade_confirmed_index on trade_records(confirmed_at_index)")
|
|
154
154
|
await conn.execute("CREATE INDEX IF NOT EXISTS trade_status on trade_records(status)")
|
|
155
|
--
|
|
155
|
++
# Remove an old redundant index on the primary key
|
|
156
|
++
await conn.execute("DROP INDEX IF EXISTS trade_id")
|
|
156
157
|
|
|
157
158
|
if needs_is_my_offer_migration:
|
|
158
159
|
await migrate_is_my_offer(self.log, conn)
|
|
@@@ -33,10 -33,10 +33,10 @@@ from chia.protocols.wallet_protocol imp
|
|
|
33
33
|
from chia.server.ws_connection import WSChiaConnection
|
|
34
34
|
from chia.types.blockchain_format.coin import Coin, hash_coin_ids
|
|
35
35
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
36
|
-
from chia.types.coin_spend import CoinSpend
|
|
36
|
+
from chia.types.coin_spend import CoinSpend, make_spend
|
|
37
37
|
from chia.types.header_block import HeaderBlock
|
|
38
38
|
from chia.util.ints import uint32
|
|
39
|
--
from chia.util.merkle_set import
|
|
39
|
++
from chia.util.merkle_set import confirm_included_already_hashed, confirm_not_included_already_hashed
|
|
40
40
|
from chia.wallet.util.peer_request_cache import PeerRequestCache
|
|
41
41
|
|
|
42
42
|
log = logging.getLogger(__name__)
|
|
@@@ -151,12 -151,12 +151,8 @@@ def validate_removals
|
|
|
151
151
|
# we must find the ones relevant to our wallets.
|
|
152
152
|
|
|
153
153
|
# Verify removals root
|
|
154
|
--
|
|
155
|
--
|
|
156
|
--
_, coin = name_coin
|
|
157
|
--
if coin is not None:
|
|
158
|
--
removals_merkle_set.add_already_hashed(coin.name())
|
|
159
|
--
removals_root = removals_merkle_set.get_root()
|
|
154
|
++
removals_items = [name for name, coin in coins if coin is not None]
|
|
155
|
++
removals_root = bytes32(compute_merkle_set_root(removals_items))
|
|
160
156
|
if root != removals_root:
|
|
161
157
|
return False
|
|
162
158
|
else:
|
|
@@@ -202,7 -202,7 +198,6 @@@ async def request_and_validate_removals
|
|
|
202
198
|
)
|
|
203
199
|
if removals_res is None or isinstance(removals_res, RejectRemovalsRequest):
|
|
204
200
|
return False
|
|
205
|
--
assert removals_res.proofs is not None
|
|
206
201
|
return validate_removals(removals_res.coins, removals_res.proofs, removals_root)
|
|
207
202
|
|
|
208
203
|
|
|
@@@ -99,8 -99,8 +99,8 @@@ class WalletBlockchain(BlockchainInterf
|
|
|
99
99
|
and block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters is not None
|
|
100
100
|
):
|
|
101
101
|
assert block.finished_sub_slots[0].challenge_chain.new_difficulty is not None # They both change together
|
|
102
|
-
sub_slot_iters =
|
|
103
|
-
difficulty =
|
|
104
|
-
sub_slot_iters: uint64 = block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters
|
|
105
|
-
difficulty: uint64 = block.finished_sub_slots[0].challenge_chain.new_difficulty
|
|
102
|
++
sub_slot_iters = block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters
|
|
103
|
++
difficulty = block.finished_sub_slots[0].challenge_chain.new_difficulty
|
|
106
104
|
else:
|
|
107
105
|
sub_slot_iters = self._sub_slot_iters
|
|
108
106
|
difficulty = self._difficulty
|
|
@@@ -49,16 -49,16 +49,17 @@@ class WalletPuzzleStore
|
|
|
49
49
|
await conn.execute(
|
|
50
50
|
"CREATE INDEX IF NOT EXISTS derivation_index_index on derivation_paths(derivation_index)"
|
|
51
51
|
)
|
|
52
|
--
|
|
53
|
--
await conn.execute("CREATE INDEX IF NOT EXISTS ph on derivation_paths(puzzle_hash)")
|
|
54
|
--
|
|
55
52
|
await conn.execute("CREATE INDEX IF NOT EXISTS pubkey on derivation_paths(pubkey)")
|
|
56
|
--
|
|
53
|
++
await conn.execute("CREATE INDEX IF NOT EXISTS ph on derivation_paths(puzzle_hash)")
|
|
57
54
|
await conn.execute("CREATE INDEX IF NOT EXISTS wallet_type on derivation_paths(wallet_type)")
|
|
58
|
--
|
|
55
|
++
# Remove an old, misnamed, redundant index on `wallet_type`
|
|
56
|
++
# See https://github.com/Chia-Network/chia-blockchain/issues/10276
|
|
57
|
++
await conn.execute("DROP INDEX IF EXISTS used")
|
|
59
58
|
await conn.execute("CREATE INDEX IF NOT EXISTS derivation_paths_wallet_id on derivation_paths(wallet_id)")
|
|
60
|
--
|
|
61
|
--
await conn.execute(
|
|
59
|
++
await conn.execute("CREATE INDEX IF NOT EXISTS derivation_paths_used_index on derivation_paths(used)")
|
|
60
|
++
await conn.execute(
|
|
61
|
++
"CREATE INDEX IF NOT EXISTS derivation_paths_hardened_index on derivation_paths(hardened)"
|
|
62
|
++
)
|
|
62
63
|
|
|
63
64
|
# the lock is locked by the users of this class
|
|
64
65
|
self.lock = asyncio.Lock()
|
|
@@@ -245,26 -245,26 +246,6 @@@
|
|
|
245
246
|
|
|
246
247
|
return None
|
|
247
248
|
|
|
248
|
--
async def index_for_puzzle_hash_and_wallet(self, puzzle_hash: bytes32, wallet_id: uint32) -> Optional[uint32]:
|
|
249
|
--
"""
|
|
250
|
--
Returns the derivation path for the puzzle_hash.
|
|
251
|
--
Returns None if not present.
|
|
252
|
--
"""
|
|
253
|
--
async with self.db_wrapper.reader_no_transaction() as conn:
|
|
254
|
--
row = await execute_fetchone(
|
|
255
|
--
conn,
|
|
256
|
--
"SELECT derivation_index FROM derivation_paths WHERE puzzle_hash=? AND wallet_id=?;",
|
|
257
|
--
(
|
|
258
|
--
puzzle_hash.hex(),
|
|
259
|
--
wallet_id,
|
|
260
|
--
),
|
|
261
|
--
)
|
|
262
|
--
|
|
263
|
--
if row is not None:
|
|
264
|
--
return uint32(row[0])
|
|
265
|
--
|
|
266
|
--
return None
|
|
267
|
--
|
|
268
249
|
async def get_wallet_identifier_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[WalletIdentifier]:
|
|
269
250
|
"""
|
|
270
251
|
Returns the derivation path for the puzzle_hash.
|
|
@@@ -65,22 -65,22 +65,20 @@@ class WalletTransactionStore
|
|
|
65
65
|
await conn.execute(
|
|
66
66
|
"CREATE INDEX IF NOT EXISTS tx_confirmed_index on transaction_record(confirmed_at_height)"
|
|
67
67
|
)
|
|
68
|
--
|
|
69
68
|
await conn.execute("CREATE INDEX IF NOT EXISTS tx_created_index on transaction_record(created_at_time)")
|
|
70
|
--
|
|
69
|
++
# Remove a redundant index on `created_at_time`
|
|
70
|
++
# See https://github.com/Chia-Network/chia-blockchain/issues/10276
|
|
71
|
++
await conn.execute("DROP INDEX IF EXISTS tx_created_time")
|
|
72
|
++
await conn.execute("CREATE INDEX IF NOT EXISTS tx_to_puzzle_hash on transaction_record(to_puzzle_hash)")
|
|
71
73
|
await conn.execute("CREATE INDEX IF NOT EXISTS tx_confirmed on transaction_record(confirmed)")
|
|
72
|
--
|
|
73
74
|
await conn.execute("CREATE INDEX IF NOT EXISTS tx_sent on transaction_record(sent)")
|
|
74
|
--
|
|
75
|
--
await conn.execute("CREATE INDEX IF NOT EXISTS tx_created_time on transaction_record(created_at_time)")
|
|
76
|
--
|
|
77
|
--
await conn.execute("CREATE INDEX IF NOT EXISTS tx_type on transaction_record(type)")
|
|
78
|
--
|
|
79
|
--
await conn.execute("CREATE INDEX IF NOT EXISTS tx_to_puzzle_hash on transaction_record(to_puzzle_hash)")
|
|
80
|
--
|
|
81
75
|
await conn.execute(
|
|
82
76
|
"CREATE INDEX IF NOT EXISTS transaction_record_wallet_id on transaction_record(wallet_id)"
|
|
83
77
|
)
|
|
78
|
++
await conn.execute(
|
|
79
|
++
"CREATE INDEX IF NOT EXISTS transaction_record_trade_id_idx ON transaction_record(trade_id)"
|
|
80
|
++
)
|
|
81
|
++
await conn.execute("CREATE INDEX IF NOT EXISTS tx_type on transaction_record(type)")
|
|
84
82
|
|
|
85
83
|
try:
|
|
86
84
|
await conn.execute("CREATE TABLE tx_times(txid blob PRIMARY KEY, valid_times blob)")
|
|
@@@ -102,19 -102,19 +100,28 @@@
|
|
|
102
100
|
Store TransactionRecord in DB and Cache.
|
|
103
101
|
"""
|
|
104
102
|
async with self.db_wrapper.writer_maybe_transaction() as conn:
|
|
103
|
++
transaction_record_old = TransactionRecordOld(
|
|
104
|
++
confirmed_at_height=record.confirmed_at_height,
|
|
105
|
++
created_at_time=record.created_at_time,
|
|
106
|
++
to_puzzle_hash=record.to_puzzle_hash,
|
|
107
|
++
amount=record.amount,
|
|
108
|
++
fee_amount=record.fee_amount,
|
|
109
|
++
confirmed=record.confirmed,
|
|
110
|
++
sent=record.sent,
|
|
111
|
++
spend_bundle=record.spend_bundle,
|
|
112
|
++
additions=record.additions,
|
|
113
|
++
removals=record.removals,
|
|
114
|
++
wallet_id=record.wallet_id,
|
|
115
|
++
sent_to=record.sent_to,
|
|
116
|
++
trade_id=record.trade_id,
|
|
117
|
++
type=record.type,
|
|
118
|
++
name=record.name,
|
|
119
|
++
memos=record.memos,
|
|
120
|
++
)
|
|
105
121
|
await conn.execute_insert(
|
|
106
122
|
"INSERT OR REPLACE INTO transaction_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
|
107
123
|
(
|
|
108
|
--
bytes(
|
|
109
|
--
TransactionRecordOld(
|
|
110
|
--
spend_bundle=record.spend_bundle,
|
|
111
|
--
**{
|
|
112
|
--
k: v
|
|
113
|
--
for k, v in dataclasses.asdict(record).items()
|
|
114
|
--
if k not in ("valid_times", "spend_bundle")
|
|
115
|
--
},
|
|
116
|
--
)
|
|
117
|
--
),
|
|
124
|
++
bytes(transaction_record_old),
|
|
118
125
|
record.name,
|
|
119
126
|
record.confirmed_at_height,
|
|
120
127
|
record.created_at_time,
|
|
@@@ -129,11 -129,11 +136,7 @@@
|
|
|
129
136
|
),
|
|
130
137
|
)
|
|
131
138
|
await conn.execute_insert(
|
|
132
|
--
"INSERT OR REPLACE INTO tx_times
|
|
133
|
--
(
|
|
134
|
--
record.name,
|
|
135
|
--
bytes(record.valid_times),
|
|
136
|
--
),
|
|
139
|
++
"INSERT OR REPLACE INTO tx_times VALUES (?, ?)", (record.name, bytes(record.valid_times))
|
|
137
140
|
)
|
|
138
141
|
|
|
139
142
|
async def delete_transaction_record(self, tx_id: bytes32) -> None:
|
|
@@@ -426,27 -426,27 +429,42 @@@
|
|
|
426
429
|
).close()
|
|
427
430
|
|
|
428
431
|
async def _get_new_tx_records_from_old(self, old_records: List[TransactionRecordOld]) -> List[TransactionRecord]:
|
|
432
|
++
tx_id_to_valid_times: Dict[bytes, ConditionValidTimes] = {}
|
|
433
|
++
empty_valid_times = ConditionValidTimes()
|
|
429
434
|
async with self.db_wrapper.reader_no_transaction() as conn:
|
|
430
|
--
valid_times: Dict[bytes32, ConditionValidTimes] = {}
|
|
431
435
|
chunked_records: List[List[TransactionRecordOld]] = [
|
|
432
436
|
old_records[i : min(len(old_records), i + self.db_wrapper.host_parameter_limit)]
|
|
433
437
|
for i in range(0, len(old_records), self.db_wrapper.host_parameter_limit)
|
|
434
438
|
]
|
|
435
439
|
for records_chunk in chunked_records:
|
|
436
440
|
cursor = await conn.execute(
|
|
437
|
--
f"SELECT txid, valid_times from tx_times WHERE txid IN ({','.join('?' *
|
|
441
|
++
f"SELECT txid, valid_times from tx_times WHERE txid IN ({','.join('?' * len(records_chunk))})",
|
|
438
442
|
tuple(tx.name for tx in records_chunk),
|
|
439
443
|
)
|
|
440
|
--
|
|
441
|
--
|
|
442
|
--
**{bytes32(res[0]): ConditionValidTimes.from_bytes(res[1]) for res in await cursor.fetchall()},
|
|
443
|
--
}
|
|
444
|
++
for row in await cursor.fetchall():
|
|
445
|
++
tx_id_to_valid_times[row[0]] = ConditionValidTimes.from_bytes(row[1])
|
|
444
446
|
await cursor.close()
|
|
445
447
|
return [
|
|
446
448
|
TransactionRecord(
|
|
447
|
--
|
|
449
|
++
confirmed_at_height=record.confirmed_at_height,
|
|
450
|
++
created_at_time=record.created_at_time,
|
|
451
|
++
to_puzzle_hash=record.to_puzzle_hash,
|
|
452
|
++
amount=record.amount,
|
|
453
|
++
fee_amount=record.fee_amount,
|
|
454
|
++
confirmed=record.confirmed,
|
|
455
|
++
sent=record.sent,
|
|
448
456
|
spend_bundle=record.spend_bundle,
|
|
449
|
--
|
|
457
|
++
additions=record.additions,
|
|
458
|
++
removals=record.removals,
|
|
459
|
++
wallet_id=record.wallet_id,
|
|
460
|
++
sent_to=record.sent_to,
|
|
461
|
++
trade_id=record.trade_id,
|
|
462
|
++
type=record.type,
|
|
463
|
++
name=record.name,
|
|
464
|
++
memos=record.memos,
|
|
465
|
++
valid_times=(
|
|
466
|
++
tx_id_to_valid_times[record.name] if record.name in tx_id_to_valid_times else empty_valid_times
|
|
467
|
++
),
|
|
450
468
|
)
|
|
451
469
|
for record in old_records
|
|
452
470
|
]
|
|
@@@ -5,12 -5,12 +5,12 @@@ set -o errexi
|
|
|
5
5
|
USAGE_TEXT="\
|
|
6
6
|
Usage: $0 [-adilpsh]
|
|
7
7
|
|
|
8
|
--
-a
|
|
8
|
++
-a ignored for compatibility with earlier versions
|
|
9
9
|
-d install development dependencies
|
|
10
10
|
-i install non-editable
|
|
11
11
|
-l install legacy keyring dependencies (linux only)
|
|
12
12
|
-p additional plotters installation
|
|
13
|
--
-s
|
|
13
|
++
-s ignored for compatibility with earlier versions
|
|
14
14
|
-h display this help and exit
|
|
15
15
|
"
|
|
16
16
|
|
|
@@@ -18,9 -18,9 +18,7 @@@ usage()
|
|
|
18
18
|
echo "${USAGE_TEXT}"
|
|
19
19
|
}
|
|
20
20
|
|
|
21
|
--
PACMAN_AUTOMATED=
|
|
22
21
|
EXTRAS=
|
|
23
|
--
SKIP_PACKAGE_INSTALL=
|
|
24
22
|
PLOTTER_INSTALL=
|
|
25
23
|
EDITABLE='-e'
|
|
26
24
|
|
|
@@@ -28,7 -28,7 +26,7 @@@ while getopts adilpsh fla
|
|
|
28
26
|
do
|
|
29
27
|
case "${flag}" in
|
|
30
28
|
# automated
|
|
31
|
--
a)
|
|
29
|
++
a) :;;
|
|
32
30
|
# development
|
|
33
31
|
d) EXTRAS=${EXTRAS}dev,;;
|
|
34
32
|
# non-editable
|
|
@@@ -37,26 -37,26 +35,12 @@@
|
|
|
37
35
|
l) EXTRAS=${EXTRAS}legacy_keyring,;;
|
|
38
36
|
p) PLOTTER_INSTALL=1;;
|
|
39
37
|
# simple install
|
|
40
|
--
s)
|
|
38
|
++
s) :;;
|
|
41
39
|
h) usage; exit 0;;
|
|
42
40
|
*) echo; usage; exit 1;;
|
|
43
41
|
esac
|
|
44
42
|
done
|
|
45
43
|
|
|
46
|
--
UBUNTU=false
|
|
47
|
--
DEBIAN=false
|
|
48
|
--
if [ "$(uname)" = "Linux" ]; then
|
|
49
|
--
#LINUX=1
|
|
50
|
--
if command -v apt-get >/dev/null; then
|
|
51
|
--
OS_ID=$(lsb_release -is)
|
|
52
|
--
if [ "$OS_ID" = "Debian" ]; then
|
|
53
|
--
DEBIAN=true
|
|
54
|
--
else
|
|
55
|
--
UBUNTU=true
|
|
56
|
--
fi
|
|
57
|
--
fi
|
|
58
|
--
fi
|
|
59
|
--
|
|
60
44
|
# Check for non 64 bit ARM64/Raspberry Pi installs
|
|
61
45
|
if [ "$(uname -m)" = "armv7l" ]; then
|
|
62
46
|
echo ""
|
|
@@@ -70,65 -70,65 +54,6 @@@ f
|
|
|
70
54
|
# Get submodules
|
|
71
55
|
git submodule update --init mozilla-ca
|
|
72
56
|
|
|
73
|
--
UBUNTU_PRE_20=0
|
|
74
|
--
UBUNTU_20=0
|
|
75
|
--
|
|
76
|
--
if $UBUNTU; then
|
|
77
|
--
LSB_RELEASE=$(lsb_release -rs)
|
|
78
|
--
# In case Ubuntu minimal does not come with bc
|
|
79
|
--
if ! command -v bc > /dev/null 2>&1; then
|
|
80
|
--
sudo apt install bc -y
|
|
81
|
--
fi
|
|
82
|
--
# Mint 20.04 responds with 20 here so 20 instead of 20.04
|
|
83
|
--
if [ "$(echo "$LSB_RELEASE<20" | bc)" = "1" ]; then
|
|
84
|
--
UBUNTU_PRE_20=1
|
|
85
|
--
else
|
|
86
|
--
UBUNTU_20=1
|
|
87
|
--
fi
|
|
88
|
--
fi
|
|
89
|
--
|
|
90
|
--
install_python3_and_sqlite3_from_source_with_yum() {
|
|
91
|
--
CURRENT_WD=$(pwd)
|
|
92
|
--
TMP_PATH=/tmp
|
|
93
|
--
|
|
94
|
--
# Preparing installing Python
|
|
95
|
--
echo 'yum groupinstall -y "Development Tools"'
|
|
96
|
--
sudo yum groupinstall -y "Development Tools"
|
|
97
|
--
echo "sudo yum install -y openssl-devel openssl libffi-devel bzip2-devel wget"
|
|
98
|
--
sudo yum install -y openssl-devel openssl libffi-devel bzip2-devel wget
|
|
99
|
--
|
|
100
|
--
echo "cd $TMP_PATH"
|
|
101
|
--
cd "$TMP_PATH"
|
|
102
|
--
# Install sqlite>=3.37
|
|
103
|
--
# yum install sqlite-devel brings sqlite3.7 which is not compatible with chia
|
|
104
|
--
echo "wget https://www.sqlite.org/2022/sqlite-autoconf-3370200.tar.gz"
|
|
105
|
--
wget https://www.sqlite.org/2022/sqlite-autoconf-3370200.tar.gz
|
|
106
|
--
tar xf sqlite-autoconf-3370200.tar.gz
|
|
107
|
--
echo "cd sqlite-autoconf-3370200"
|
|
108
|
--
cd sqlite-autoconf-3370200
|
|
109
|
--
echo "./configure --prefix=/usr/local"
|
|
110
|
--
# '| stdbuf ...' seems weird but this makes command outputs stay in single line.
|
|
111
|
--
./configure --prefix=/usr/local | stdbuf -o0 cut -b1-"$(tput cols)" | sed -u 'i\\o033[2K' | stdbuf -o0 tr '\n' '\r'; echo
|
|
112
|
--
echo "make -j$(nproc)"
|
|
113
|
--
make -j"$(nproc)" | stdbuf -o0 cut -b1-"$(tput cols)" | sed -u 'i\\o033[2K' | stdbuf -o0 tr '\n' '\r'; echo
|
|
114
|
--
echo "sudo make install"
|
|
115
|
--
sudo make install | stdbuf -o0 cut -b1-"$(tput cols)" | sed -u 'i\\o033[2K' | stdbuf -o0 tr '\n' '\r'; echo
|
|
116
|
--
# yum install python3 brings Python3.6 which is not supported by chia
|
|
117
|
--
cd ..
|
|
118
|
--
echo "wget https://www.python.org/ftp/python/3.9.11/Python-3.9.11.tgz"
|
|
119
|
--
wget https://www.python.org/ftp/python/3.9.11/Python-3.9.11.tgz
|
|
120
|
--
tar xf Python-3.9.11.tgz
|
|
121
|
--
echo "cd Python-3.9.11"
|
|
122
|
--
cd Python-3.9.11
|
|
123
|
--
echo "LD_RUN_PATH=/usr/local/lib ./configure --prefix=/usr/local"
|
|
124
|
--
# '| stdbuf ...' seems weird but this makes command outputs stay in single line.
|
|
125
|
--
LD_RUN_PATH=/usr/local/lib ./configure --prefix=/usr/local | stdbuf -o0 cut -b1-"$(tput cols)" | sed -u 'i\\o033[2K' | stdbuf -o0 tr '\n' '\r'; echo
|
|
126
|
--
echo "LD_RUN_PATH=/usr/local/lib make -j$(nproc)"
|
|
127
|
--
LD_RUN_PATH=/usr/local/lib make -j"$(nproc)" | stdbuf -o0 cut -b1-"$(tput cols)" | sed -u 'i\\o033[2K' | stdbuf -o0 tr '\n' '\r'; echo
|
|
128
|
--
echo "LD_RUN_PATH=/usr/local/lib sudo make altinstall"
|
|
129
|
--
LD_RUN_PATH=/usr/local/lib sudo make altinstall | stdbuf -o0 cut -b1-"$(tput cols)" | sed -u 'i\\o033[2K' | stdbuf -o0 tr '\n' '\r'; echo
|
|
130
|
--
cd "$CURRENT_WD"
|
|
131
|
--
}
|
|
132
57
|
|
|
133
58
|
# You can specify preferred python version by exporting `INSTALL_PYTHON_VERSION`
|
|
134
59
|
# e.g. `export INSTALL_PYTHON_VERSION=3.8`
|
|
@@@ -183,75 -183,73 +108,6 @@@ find_openssl()
|
|
|
183
108
|
set -e
|
|
184
109
|
}
|
|
185
110
|
|
|
186
|
--
# Manage npm and other install requirements on an OS specific basis
|
|
187
|
--
if [ "$SKIP_PACKAGE_INSTALL" = "1" ]; then
|
|
188
|
--
echo "Skipping system package installation"
|
|
189
|
--
elif [ "$(uname)" = "Linux" ]; then
|
|
190
|
--
#LINUX=1
|
|
191
|
--
if [ "$UBUNTU_PRE_20" = "1" ]; then
|
|
192
|
--
# Ubuntu
|
|
193
|
--
echo "Installing on Ubuntu pre 20.*."
|
|
194
|
--
sudo apt-get update
|
|
195
|
--
# distutils must be installed as well to avoid a complaint about ensurepip while
|
|
196
|
--
# creating the venv. This may be related to a mis-check while using or
|
|
197
|
--
# misconfiguration of the secondary Python version 3.7. The primary is Python 3.6.
|
|
198
|
--
sudo apt-get install -y python3.7-venv python3.7-distutils openssl
|
|
199
|
--
elif [ "$UBUNTU_20" = "1" ]; then
|
|
200
|
--
echo "Installing on Ubuntu 20.* or newer."
|
|
201
|
--
sudo apt-get update
|
|
202
|
--
sudo apt-get install -y python3-venv openssl
|
|
203
|
--
elif [ "$DEBIAN" = "true" ]; then
|
|
204
|
--
echo "Installing on Debian."
|
|
205
|
--
sudo apt-get update
|
|
206
|
--
sudo apt-get install -y python3-venv openssl
|
|
207
|
--
elif type pacman >/dev/null 2>&1 && [ -f "/etc/arch-release" ]; then
|
|
208
|
--
# Arch Linux
|
|
209
|
--
# Arch provides latest python version. User will need to manually install python 3.9 if it is not present
|
|
210
|
--
echo "Installing on Arch Linux."
|
|
211
|
--
case $(uname -m) in
|
|
212
|
--
x86_64|aarch64)
|
|
213
|
-
if ! pacman -Qs "^git$" > /dev/null || ! pacman -Qs "^openssl$" > /dev/null ; then
|
|
214
|
-
sudo pacman ${PACMAN_AUTOMATED} -S --needed git openssl
|
|
215
|
-
fi
|
|
216
|
-
sudo pacman ${PACMAN_AUTOMATED} -S --needed git openssl
|
|
217
|
--
;;
|
|
218
|
--
*)
|
|
219
|
--
echo "Incompatible CPU architecture. Must be x86_64 or aarch64."
|
|
220
|
--
exit 1
|
|
221
|
--
;;
|
|
222
|
--
esac
|
|
223
|
--
elif type yum >/dev/null 2>&1 && [ ! -f "/etc/redhat-release" ] && [ ! -f "/etc/centos-release" ] && [ ! -f "/etc/fedora-release" ]; then
|
|
224
|
--
# AMZN 2
|
|
225
|
--
echo "Installing on Amazon Linux 2."
|
|
226
|
--
if ! command -v python3.9 >/dev/null 2>&1; then
|
|
227
|
--
install_python3_and_sqlite3_from_source_with_yum
|
|
228
|
--
fi
|
|
229
|
--
elif type yum >/dev/null 2>&1 && [ -f "/etc/centos-release" ]; then
|
|
230
|
--
# CentOS
|
|
231
|
--
echo "Install on CentOS."
|
|
232
|
--
if ! command -v python3.9 >/dev/null 2>&1; then
|
|
233
|
--
install_python3_and_sqlite3_from_source_with_yum
|
|
234
|
--
fi
|
|
235
|
--
elif type yum >/dev/null 2>&1 && [ -f "/etc/redhat-release" ] && grep Rocky /etc/redhat-release; then
|
|
236
|
--
echo "Installing on Rocky."
|
|
237
|
--
# TODO: make this smarter about getting the latest version
|
|
238
|
--
sudo yum install --assumeyes python39 openssl
|
|
239
|
--
elif type yum >/dev/null 2>&1 && [ -f "/etc/redhat-release" ] || [ -f "/etc/fedora-release" ]; then
|
|
240
|
--
# Redhat or Fedora
|
|
241
|
--
echo "Installing on Redhat/Fedora."
|
|
242
|
--
if ! command -v python3.9 >/dev/null 2>&1; then
|
|
243
|
--
sudo yum install -y python39 openssl
|
|
244
|
--
fi
|
|
245
|
--
fi
|
|
246
|
--
elif [ "$(uname)" = "Darwin" ]; then
|
|
247
|
--
echo "Installing on macOS."
|
|
248
|
--
if ! type brew >/dev/null 2>&1; then
|
|
249
|
--
echo "Installation currently requires brew on macOS - https://brew.sh/"
|
|
250
|
--
exit 1
|
|
251
|
--
fi
|
|
252
|
--
echo "Installing OpenSSL"
|
|
253
|
--
brew install openssl
|
|
254
|
--
fi
|
|
255
|
--
|
|
256
111
|
if [ "$(uname)" = "OpenBSD" ]; then
|
|
257
112
|
export MAKE=${MAKE:-gmake}
|
|
258
113
|
export BUILD_VDF_CLIENT=${BUILD_VDF_CLIENT:-N}
|
|
@@@ -277,11 -275,11 +133,13 @@@ if ! command -v "$INSTALL_PYTHON_PATH"
|
|
|
277
133
|
fi
|
|
278
134
|
|
|
279
135
|
if [ "$PYTHON_MAJOR_VER" -ne "3" ] || [ "$PYTHON_MINOR_VER" -lt "7" ] || [ "$PYTHON_MINOR_VER" -ge "12" ]; then
|
|
280
|
--
echo "Chia requires Python version >= 3.
|
|
136
|
++
echo "Chia requires Python version >= 3.8 and < 3.12.0" >&2
|
|
281
137
|
echo "Current Python version = $INSTALL_PYTHON_VERSION" >&2
|
|
282
138
|
# If Arch, direct to Arch Wiki
|
|
283
139
|
if type pacman >/dev/null 2>&1 && [ -f "/etc/arch-release" ]; then
|
|
284
140
|
echo "Please see https://wiki.archlinux.org/title/python#Old_versions for support." >&2
|
|
141
|
++
else
|
|
142
|
++
echo "Please install python per your OS instructions." >&2
|
|
285
143
|
fi
|
|
286
144
|
|
|
287
145
|
exit 1
|
|
@@@ -292,6 -290,6 +150,7 @@@ find_sqlit
|
|
|
292
150
|
echo "SQLite version for Python is ${SQLITE_VERSION}"
|
|
293
151
|
if [ "$SQLITE_MAJOR_VER" -lt "3" ] || [ "$SQLITE_MAJOR_VER" = "3" ] && [ "$SQLITE_MINOR_VER" -lt "8" ]; then
|
|
294
152
|
echo "Only sqlite>=3.8 is supported"
|
|
153
|
++
echo "Please install sqlite3 per your OS instructions."
|
|
295
154
|
exit 1
|
|
296
155
|
fi
|
|
297
156
|
|
|
@@@ -302,6 -300,6 +161,7 @@@ echo "OpenSSL version for Python is ${O
|
|
|
302
161
|
if [ "$OPENSSL_VERSION_INT" -lt "269488367" ]; then
|
|
303
162
|
echo "WARNING: OpenSSL versions before 3.0.2, 1.1.1n, or 1.0.2zd are vulnerable to CVE-2022-0778"
|
|
304
163
|
echo "Your OS may have patched OpenSSL and not updated the version to 1.1.1n"
|
|
164
|
++
echo "We recommend updating to the latest version of OpenSSL available for your OS"
|
|
305
165
|
fi
|
|
306
166
|
|
|
307
167
|
# If version of `python` and "$INSTALL_PYTHON_VERSION" does not match, clear old version
|
|
@@@ -74,7 -83,7 +74,6 @@@ tests.core.test_daemon_rp
|
|
|
74
74
|
tests.core.test_db_conversion
|
|
75
75
|
tests.core.test_filter
|
|
76
76
|
tests.core.test_full_node_rpc
|
|
77
|
--
tests.core.test_merkle_set
|
|
78
77
|
tests.core.util.test_cached_bls
|
|
79
78
|
tests.core.util.test_config
|
|
80
79
|
tests.core.util.test_file_keyring_synchronization
|
|
@@@ -7,24 -7,24 +7,24 @@@ from setuptools import find_packages, s
|
|
|
7
7
|
|
|
8
8
|
dependencies = [
|
|
9
9
|
"aiofiles==23.2.1", # Async IO for files
|
|
10
|
-
"anyio==4.
|
|
11
|
-
"boto3==1.29.0", # AWS S3 for DL s3 plugin
|
|
12
|
-
"chiavdf==1.1.0", # timelord and vdf verification
|
|
10
|
+
"anyio==4.2.0",
|
|
13
|
-
"boto3==1.34.
|
|
11
|
++
"boto3==1.34.40", # AWS S3 for DL s3 plugin
|
|
12
|
+
"chiavdf==1.1.1", # timelord and vdf verification
|
|
14
13
|
"chiabip158==1.3", # bip158-style wallet filters
|
|
15
14
|
"chiapos==2.0.3", # proof of space
|
|
16
15
|
"clvm==0.9.8",
|
|
17
|
--
"clvm_tools==0.4.
|
|
18
|
-
"chia_rs==0.
|
|
19
|
-
"
|
|
20
|
-
"
|
|
21
|
-
"aiohttp==3.8.6", # HTTP server for full node rpc
|
|
16
|
++
"clvm_tools==0.4.8", # Currying, Program.to, other conveniences
|
|
17
|
++
"chia_rs==0.5.2",
|
|
18
|
+
"clvm-tools-rs==0.1.40", # Rust implementation of clvm_tools' compiler
|
|
19
|
+
"aiohttp==3.9.1", # HTTP server for full node rpc
|
|
22
20
|
"aiosqlite==0.19.0", # asyncio wrapper for sqlite, to store blocks
|
|
23
|
-
"bitstring==4.1.
|
|
21
|
+
"bitstring==4.1.4", # Binary data management library
|
|
24
22
|
"colorama==0.4.6", # Colorizes terminal output
|
|
25
|
-
"colorlog==6.
|
|
26
|
-
"concurrent-log-handler==0.9.
|
|
27
|
-
"cryptography==
|
|
23
|
+
"colorlog==6.8.2", # Adds color to logs
|
|
24
|
+
"concurrent-log-handler==0.9.25", # Concurrently log and rotate logs
|
|
25
|
+
"cryptography==42.0.2", # Python cryptography library for TLS - keyring conflict
|
|
28
26
|
"filelock==3.13.1", # For reading and writing config multiprocess and multithread safely (non-reentrant locks)
|
|
29
|
-
"keyring==
|
|
27
|
+
"keyring==24.3.0", # Store keys in MacOS Keychain, Windows Credential Locker
|
|
30
28
|
"PyYAML==6.0.1", # Used for config file format
|
|
31
29
|
"setproctitle==1.3.3", # Gives the chia processes readable names
|
|
32
30
|
"sortedcontainers==2.4.0", # For maintaining sorted mempools
|
|
@@@ -46,29 -44,28 +46,29 @@@ upnp_dependencies =
|
|
|
46
46
|
|
|
47
47
|
dev_dependencies = [
|
|
48
48
|
"build==1.0.3",
|
|
49
|
-
"coverage==7.
|
|
49
|
+
"coverage==7.4.1",
|
|
50
50
|
"diff-cover==8.0.1",
|
|
51
|
-
"pre-commit==3.5.0",
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"
|
|
51
|
+
"pre-commit==3.5.0; python_version < '3.9'",
|
|
52
|
+
"pre-commit==3.6.0; python_version >= '3.9'",
|
|
53
|
+
"py3createtorrent==1.2.0",
|
|
54
|
+
"pylint==3.0.3",
|
|
55
|
+
"pytest==8.0.0",
|
|
55
56
|
"pytest-cov==4.1.0",
|
|
56
57
|
"pytest-mock==3.12.0",
|
|
57
|
-
"pytest-xdist==3.
|
|
58
|
+
"pytest-xdist==3.5.0",
|
|
58
59
|
"pyupgrade==3.15.0",
|
|
59
|
--
"twine==
|
|
60
|
-
"isort==5.
|
|
61
|
-
"flake8==
|
|
62
|
-
"mypy==1.
|
|
63
|
-
"black==23.
|
|
64
|
-
"lxml==
|
|
60
|
++
"twine==5.0.0",
|
|
61
|
+
"isort==5.13.2",
|
|
62
|
+
"flake8==7.0.0",
|
|
63
|
+
"mypy==1.8.0",
|
|
64
|
+
"black==23.12.1",
|
|
65
|
+
"lxml==5.1.0",
|
|
65
66
|
"aiohttp_cors==0.7.0", # For blackd
|
|
66
|
-
"pyinstaller==
|
|
67
|
-
"types-aiofiles==23.2.0.
|
|
67
|
+
"pyinstaller==6.3.0",
|
|
68
|
+
"types-aiofiles==23.2.0.20240106",
|
|
68
69
|
"types-cryptography==3.3.23.2",
|
|
69
70
|
"types-pyyaml==6.0.12.12",
|
|
70
|
-
"types-setuptools==
|
|
71
|
+
"types-setuptools==69.0.0.20240115",
|
|
71
72
|
]
|
|
72
73
|
|
|
73
74
|
legacy_keyring_dependencies = [
|
|
@@@ -6,7 -6,7 +6,8 @@@ from chia_rs import G2Elemen
|
|
|
6
6
|
from chia.clvm.spend_sim import sim_and_client
|
|
7
7
|
from chia.types.blockchain_format.program import Program
|
|
8
8
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
9
|
-
from chia.types.coin_spend import make_spend
|
|
10
|
-
from chia.types.
|
|
9
|
++
from chia.types.coin_spend import compute_additions, make_spend
|
|
10
|
++
from chia.types.condition_opcodes import ConditionOpcode
|
|
11
11
|
from chia.types.spend_bundle import SpendBundle
|
|
12
12
|
|
|
13
13
|
|
|
@@@ -45,6 -45,6 +46,54 @@@ async def test_all_endpoints()
|
|
|
45
46
|
for i in range(0, 5):
|
|
46
47
|
await sim.farm_block()
|
|
47
48
|
|
|
49
|
++
# get_coin_records_by_hint
|
|
50
|
++
acs = Program.to(1)
|
|
51
|
++
acs_ph = acs.get_tree_hash()
|
|
52
|
++
await sim.farm_block(acs_ph)
|
|
53
|
++
coin_records = await sim_client.get_coin_records_by_puzzle_hash(acs.get_tree_hash())
|
|
54
|
++
coin = coin_records[0].coin
|
|
55
|
++
hint = Program.to("hint").get_tree_hash()
|
|
56
|
++
non_existent_hint = Program.to("non_existent_hint").get_tree_hash()
|
|
57
|
++
acs_hint_spent = make_spend(
|
|
58
|
++
coin,
|
|
59
|
++
acs,
|
|
60
|
++
Program.to([[ConditionOpcode.CREATE_COIN, acs.get_tree_hash(), 2, [hint]]]),
|
|
61
|
++
)
|
|
62
|
++
hinted_coin = compute_additions(acs_hint_spent)[0]
|
|
63
|
++
acs_hint_unspent = make_spend(
|
|
64
|
++
hinted_coin,
|
|
65
|
++
acs,
|
|
66
|
++
Program.to([[ConditionOpcode.CREATE_COIN, acs.get_tree_hash(), 1, [hint]]]),
|
|
67
|
++
)
|
|
68
|
++
await sim_client.push_tx(SpendBundle([acs_hint_spent, acs_hint_unspent], G2Element()))
|
|
69
|
++
await sim.farm_block(acs_ph)
|
|
70
|
++
coin_records = await sim_client.get_coin_records_by_hint(hint, include_spent_coins=False)
|
|
71
|
++
assert len(coin_records) == 1
|
|
72
|
++
coin_records = await sim_client.get_coin_records_by_hint(hint, include_spent_coins=True)
|
|
73
|
++
assert len(coin_records) == 2
|
|
74
|
++
coin_records = await sim_client.get_coin_records_by_hint(non_existent_hint)
|
|
75
|
++
assert len(coin_records) == 0
|
|
76
|
++
coin_records = await sim_client.get_coin_records_by_puzzle_hash(acs.get_tree_hash())
|
|
77
|
++
next_coin = coin_records[-1].coin
|
|
78
|
++
height = sim.get_height()
|
|
79
|
++
acs_hint_next_coin = make_spend(
|
|
80
|
++
next_coin,
|
|
81
|
++
acs,
|
|
82
|
++
Program.to([[ConditionOpcode.CREATE_COIN, acs.get_tree_hash(), 2, [hint]]]),
|
|
83
|
++
)
|
|
84
|
++
await sim_client.push_tx(SpendBundle([acs_hint_next_coin], G2Element()))
|
|
85
|
++
await sim.farm_block(acs_ph)
|
|
86
|
++
coin_records = await sim_client.get_coin_records_by_hint(hint, start_height=height + 1, end_height=height + 2)
|
|
87
|
++
assert len(coin_records) == 1
|
|
88
|
++
coin_records = await sim_client.get_coin_records_by_hint(hint, start_height=height)
|
|
89
|
++
assert len(coin_records) == 3
|
|
90
|
++
coin_records = await sim_client.get_coin_records_by_hint(hint, end_height=height + 1)
|
|
91
|
++
assert len(coin_records) == 2
|
|
92
|
++
coin_records = await sim_client.get_coin_records_by_hint(hint, end_height=height + 1, include_spent_coins=False)
|
|
93
|
++
assert len(coin_records) == 1
|
|
94
|
++
coin_records = await sim_client.get_coin_records_by_hint(hint, start_height=height + 3)
|
|
95
|
++
assert len(coin_records) == 0
|
|
96
|
++
|
|
48
97
|
# get_coin_records_by_puzzle_hash
|
|
49
98
|
coin_records = await sim_client.get_coin_records_by_puzzle_hash(bytes32([0] * 32))
|
|
50
99
|
coin_record_name = coin_records[0].coin.name()
|
|
@@@ -89,7 -89,7 +138,7 @@@
|
|
|
89
138
|
# push_tx
|
|
90
139
|
puzzle_hash = bytes.fromhex("9dcf97a184f32623d11a73124ceb99a5709b083721e878a16d78f596718ba7b2") # Program.to(1)
|
|
91
140
|
await sim.farm_block(puzzle_hash)
|
|
92
|
--
spendable_coin = await sim_client.get_coin_records_by_puzzle_hash(puzzle_hash)
|
|
141
|
++
spendable_coin = await sim_client.get_coin_records_by_puzzle_hash(puzzle_hash, include_spent_coins=False)
|
|
93
142
|
spendable_coin = spendable_coin[0].coin
|
|
94
143
|
bundle = SpendBundle(
|
|
95
144
|
[
|
|
@@@ -102,7 -102,7 +151,7 @@@
|
|
|
102
151
|
G2Element(),
|
|
103
152
|
)
|
|
104
153
|
result, error = await sim_client.push_tx(bundle)
|
|
105
|
--
|
|
154
|
++
assert not error
|
|
106
155
|
# get_all_mempool_tx_ids
|
|
107
156
|
mempool_items = await sim_client.get_all_mempool_tx_ids()
|
|
108
157
|
assert len(mempool_items) == 1
|
|
@@@ -377,6 -377,6 +377,7 @@@ def create_service_and_wallet_client_ge
|
|
|
377
377
|
rpc_port: Optional[int] = None,
|
|
378
378
|
root_path: Optional[Path] = None,
|
|
379
379
|
consume_errors: bool = True,
|
|
380
|
++
use_ssl: bool = True,
|
|
380
381
|
) -> AsyncIterator[Tuple[_T_RpcClient, Dict[str, Any]]]:
|
|
381
382
|
if root_path is None:
|
|
382
383
|
root_path = default_root
|
|
@@@ -1,0 -1,0 +1,27 @@@
|
|
|
1
|
++
from __future__ import annotations
|
|
2
|
++
|
|
3
|
++
from pathlib import Path
|
|
4
|
++
|
|
5
|
++
import pytest
|
|
6
|
++
|
|
7
|
++
from chia.cmds.cmds_util import get_any_service_client
|
|
8
|
++
from chia.rpc.rpc_client import RpcClient
|
|
9
|
++
from tests.util.misc import RecordingWebServer
|
|
10
|
++
|
|
11
|
++
|
|
12
|
++
@pytest.mark.anyio
|
|
13
|
++
async def test_get_any_service_client_works_without_ssl(
|
|
14
|
++
root_path_populated_with_config: Path,
|
|
15
|
++
recording_web_server: RecordingWebServer,
|
|
16
|
++
) -> None:
|
|
17
|
++
expected_result = {"success": True, "keepy": "uppy"}
|
|
18
|
++
|
|
19
|
++
async with get_any_service_client(
|
|
20
|
++
client_type=RpcClient,
|
|
21
|
++
rpc_port=recording_web_server.web_server.listen_port,
|
|
22
|
++
root_path=root_path_populated_with_config,
|
|
23
|
++
use_ssl=False,
|
|
24
|
++
) as [rpc_client, _]:
|
|
25
|
++
result = await rpc_client.fetch(path="", request_json={"response": expected_result})
|
|
26
|
++
|
|
27
|
++
assert result == expected_result
|
|
@@@ -3,7 -3,6 +3,8 @@@ from __future__ import annotation
|
|
|
3
3
|
from pathlib import Path
|
|
4
4
|
from typing import Any, Dict, List, Optional, Tuple, Union, cast
|
|
5
5
|
|
|
6
|
+
import pkg_resources
|
|
7
|
++
import pytest
|
|
6
8
|
from chia_rs import Coin, G2Element
|
|
7
9
|
|
|
8
10
|
from chia.server.outbound_message import NodeType
|
|
@@@ -627,6 -626,6 +628,59 @@@ def test_add_token(capsys: object, get_
|
|
|
627
628
|
test_rpc_clients.wallet_rpc_client.check_log(expected_calls)
|
|
628
629
|
|
|
629
630
|
|
|
631
|
++
def test_make_offer_bad_filename(
|
|
632
|
++
capsys: object, get_test_cli_clients: Tuple[TestRpcClients, Path], tmp_path: Path
|
|
633
|
++
) -> None:
|
|
634
|
++
_, root_dir = get_test_cli_clients
|
|
635
|
++
|
|
636
|
++
request_cat_id = get_bytes32(2)
|
|
637
|
++
request_nft_id = get_bytes32(2)
|
|
638
|
++
request_nft_addr = encode_puzzle_hash(request_nft_id, "nft")
|
|
639
|
++
# we offer xch and a random cat via wallet id and request a random cat, nft via coin and tail
|
|
640
|
++
command_args_dir = [
|
|
641
|
++
"wallet",
|
|
642
|
++
"make_offer",
|
|
643
|
++
FINGERPRINT_ARG,
|
|
644
|
++
f"-p{str(tmp_path)}",
|
|
645
|
++
"--reuse",
|
|
646
|
++
"-m1",
|
|
647
|
++
"--offer",
|
|
648
|
++
"1:10",
|
|
649
|
++
"--offer",
|
|
650
|
++
"3:100",
|
|
651
|
++
"--request",
|
|
652
|
++
f"{request_cat_id.hex()}:10",
|
|
653
|
++
"--request",
|
|
654
|
++
f"{request_nft_addr}:1",
|
|
655
|
++
]
|
|
656
|
++
|
|
657
|
++
test_file: Path = tmp_path / "test.offer"
|
|
658
|
++
test_file.touch(mode=0o400)
|
|
659
|
++
|
|
660
|
++
command_args_unwritable = [
|
|
661
|
++
"wallet",
|
|
662
|
++
"make_offer",
|
|
663
|
++
FINGERPRINT_ARG,
|
|
664
|
++
f"-p{str(test_file)}",
|
|
665
|
++
"--reuse",
|
|
666
|
++
"-m1",
|
|
667
|
++
"--offer",
|
|
668
|
++
"1:10",
|
|
669
|
++
"--offer",
|
|
670
|
++
"3:100",
|
|
671
|
++
"--request",
|
|
672
|
++
f"{request_cat_id.hex()}:10",
|
|
673
|
++
"--request",
|
|
674
|
++
f"{request_nft_addr}:1",
|
|
675
|
++
]
|
|
676
|
++
|
|
677
|
++
with pytest.raises(AssertionError, match=r".*Invalid value for '-p' / '--filepath.*is a directory.*"):
|
|
678
|
++
run_cli_command_and_assert(capsys, root_dir, command_args_dir, [""])
|
|
679
|
++
|
|
680
|
++
with pytest.raises(AssertionError, match=r".*Invalid value for '-p' / '--filepath.*is not writable.*"):
|
|
681
|
++
run_cli_command_and_assert(capsys, root_dir, command_args_unwritable, [""])
|
|
682
|
++
|
|
683
|
++
|
|
630
684
|
def test_make_offer(capsys: object, get_test_cli_clients: Tuple[TestRpcClients, Path], tmp_path: Path) -> None:
|
|
631
685
|
test_rpc_clients, root_dir = get_test_cli_clients
|
|
632
686
|
|
|
@@@ -5,6 -5,6 +5,7 @@@ import asynci
|
|
|
5
5
|
import dataclasses
|
|
6
6
|
import datetime
|
|
7
7
|
import functools
|
|
8
|
++
import json
|
|
8
9
|
import math
|
|
9
10
|
import multiprocessing
|
|
10
11
|
import os
|
|
@@@ -20,11 -19,18 +21,13 @@@ import pytes
|
|
|
20
21
|
|
|
21
22
|
# TODO: update after resolution in https://github.com/pytest-dev/pytest/issues/7469
|
|
22
23
|
from _pytest.fixtures import SubRequest
|
|
24
|
++
from pytest import MonkeyPatch
|
|
23
25
|
|
|
26
|
++
import tests
|
|
24
27
|
from chia.clvm.spend_sim import CostLogger
|
|
25
|
-
|
|
26
|
-
# Set spawn after stdlib imports, but before other imports
|
|
27
28
|
from chia.consensus.constants import ConsensusConstants
|
|
28
|
-
from chia.farmer.farmer import Farmer
|
|
29
|
-
from chia.farmer.farmer_api import FarmerAPI
|
|
30
29
|
from chia.full_node.full_node import FullNode
|
|
31
30
|
from chia.full_node.full_node_api import FullNodeAPI
|
|
32
|
-
from chia.harvester.harvester import Harvester
|
|
33
|
-
from chia.harvester.harvester_api import HarvesterAPI
|
|
34
|
-
from chia.protocols import full_node_protocol
|
|
35
31
|
from chia.rpc.farmer_rpc_client import FarmerRpcClient
|
|
36
32
|
from chia.rpc.harvester_rpc_client import HarvesterRpcClient
|
|
37
33
|
from chia.rpc.wallet_rpc_client import WalletRpcClient
|
|
@@@ -66,37 -61,20 +69,52 @@@ from chia.util.task_timing import main
|
|
|
66
69
|
from chia.util.task_timing import start_task_instrumentation, stop_task_instrumentation
|
|
67
70
|
from chia.wallet.wallet_node import WalletNode
|
|
68
71
|
from chia.wallet.wallet_node_api import WalletNodeAPI
|
|
72
|
++
from tests import ether
|
|
69
73
|
from tests.core.data_layer.util import ChiaRoot
|
|
70
74
|
from tests.core.node_height import node_height_at_least
|
|
71
75
|
from tests.simulation.test_simulation import test_constants_modified
|
|
72
|
-
from tests.util.misc import BenchmarkRunner, GcMode, RecordingWebServer, _AssertRuntime, measure_overhead
|
|
73
|
-
from tests.util.
|
|
76
|
++
from tests.util.misc import BenchmarkRunner, GcMode, RecordingWebServer, TestId, _AssertRuntime, measure_overhead
|
|
77
|
+
from tests.util.setup_nodes import (
|
|
78
|
+
OldSimulatorsAndWallets,
|
|
79
|
+
SimulatorsAndWallets,
|
|
80
|
+
setup_full_system,
|
|
81
|
+
setup_n_nodes,
|
|
82
|
+
setup_simulators_and_wallets,
|
|
83
|
+
setup_simulators_and_wallets_service,
|
|
84
|
+
setup_two_nodes,
|
|
85
|
+
)
|
|
74
86
|
from tests.util.time_out_assert import time_out_assert
|
|
75
87
|
|
|
76
88
|
multiprocessing.set_start_method("spawn")
|
|
77
89
|
|
|
78
90
|
from pathlib import Path
|
|
79
91
|
|
|
80
|
-
from chia.simulator.block_tools import BlockTools,
|
|
92
|
+
from chia.simulator.block_tools import BlockTools, create_block_tools_async, test_constants
|
|
81
93
|
from chia.simulator.keyring import TempKeyring
|
|
82
|
-
from chia.simulator.setup_nodes import setup_farmer_multi_harvester
|
|
83
94
|
from chia.util.keyring_wrapper import KeyringWrapper
|
|
95
|
+
from tests.util.setup_nodes import setup_farmer_multi_harvester
|
|
96
|
+
|
|
97
|
+
|
|
98
|
++
@pytest.fixture(name="ether_setup", autouse=True)
|
|
99
|
++
def ether_setup_fixture(request: SubRequest, record_property: Callable[[str, object], None]) -> Iterator[None]:
|
|
100
|
++
with MonkeyPatch.context() as monkeypatch_context:
|
|
101
|
++
monkeypatch_context.setattr(ether, "record_property", record_property)
|
|
102
|
++
monkeypatch_context.setattr(ether, "test_id", TestId.create(node=request.node))
|
|
103
|
++
yield
|
|
104
|
++
|
|
105
|
++
|
|
106
|
++
@pytest.fixture(autouse=True)
|
|
107
|
++
def ether_test_id_property_fixture(ether_setup: None, record_property: Callable[[str, object], None]) -> None:
|
|
108
|
++
assert ether.test_id is not None, "ether.test_id is None, did you forget to use the ether_setup fixture?"
|
|
109
|
++
record_property("test_id", json.dumps(ether.test_id.marshal(), ensure_ascii=True, sort_keys=True))
|
|
110
|
++
|
|
111
|
++
|
|
112
|
+
def make_old_setup_simulators_and_wallets(new: SimulatorsAndWallets) -> OldSimulatorsAndWallets:
|
|
113
|
+
return (
|
|
114
|
+
[simulator.peer_api for simulator in new.simulators],
|
|
115
|
+
[(wallet.node, wallet.peer_server) for wallet in new.wallets],
|
|
116
|
+
new.bt,
|
|
117
|
+
)
|
|
84
118
|
|
|
85
119
|
|
|
86
120
|
@pytest.fixture(scope="session")
|
|
@@@ -131,16 -109,16 +149,12 @@@ def benchmark_runner_overhead_fixture(
|
|
|
131
149
|
|
|
132
150
|
@pytest.fixture(name="benchmark_runner")
|
|
133
151
|
def benchmark_runner_fixture(
|
|
134
|
--
request: SubRequest,
|
|
135
152
|
benchmark_runner_overhead: float,
|
|
136
|
--
record_property: Callable[[str, object], None],
|
|
137
153
|
benchmark_repeat: int,
|
|
138
154
|
) -> BenchmarkRunner:
|
|
139
|
--
label = request.node.name
|
|
140
155
|
return BenchmarkRunner(
|
|
141
|
--
|
|
156
|
++
test_id=ether.test_id,
|
|
142
157
|
overhead=benchmark_runner_overhead,
|
|
143
|
--
record_property=record_property,
|
|
144
158
|
)
|
|
145
159
|
|
|
146
160
|
|
|
@@@ -434,6 -418,6 +448,13 @@@ def pytest_addoption(parser: pytest.Par
|
|
|
434
448
|
type=int,
|
|
435
449
|
help=f"The number of times to run each benchmark, default {default_repeats}.",
|
|
436
450
|
)
|
|
451
|
++
group.addoption(
|
|
452
|
++
"--time-out-assert-repeats",
|
|
453
|
++
action="store",
|
|
454
|
++
default=default_repeats,
|
|
455
|
++
type=int,
|
|
456
|
++
help=f"The number of times to run each test with time out asserts, default {default_repeats}.",
|
|
457
|
++
)
|
|
437
458
|
|
|
438
459
|
|
|
439
460
|
def pytest_configure(config):
|
|
@@@ -459,6 -443,6 +480,22 @@@
|
|
|
459
480
|
|
|
460
481
|
globals()[benchmark_repeat_fixture.__name__] = benchmark_repeat_fixture
|
|
461
482
|
|
|
483
|
++
time_out_assert_repeats = config.getoption("--time-out-assert-repeats")
|
|
484
|
++
if time_out_assert_repeats != 1:
|
|
485
|
++
|
|
486
|
++
@pytest.fixture(
|
|
487
|
++
name="time_out_assert_repeat",
|
|
488
|
++
autouse=True,
|
|
489
|
++
params=[
|
|
490
|
++
pytest.param(repeat, id=f"time_out_assert_repeat{repeat:03d}")
|
|
491
|
++
for repeat in range(time_out_assert_repeats)
|
|
492
|
++
],
|
|
493
|
++
)
|
|
494
|
++
def time_out_assert_repeat_fixture(request: SubRequest) -> int:
|
|
495
|
++
return request.param
|
|
496
|
++
|
|
497
|
++
globals()[time_out_assert_repeat_fixture.__name__] = time_out_assert_repeat_fixture
|
|
498
|
++
|
|
462
499
|
|
|
463
500
|
def pytest_collection_modifyitems(session, config: pytest.Config, items: List[pytest.Function]):
|
|
464
501
|
# https://github.com/pytest-dev/pytest/issues/3730#issuecomment-567142496
|
|
@@@ -1,56 -1,0 +1,56 @@@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
from chia.consensus.block_creation import compute_block_cost, compute_block_fee
|
|
8
|
+
from chia.consensus.condition_costs import ConditionCost
|
|
9
|
+
from chia.consensus.default_constants import DEFAULT_CONSTANTS
|
|
10
|
+
from chia.types.blockchain_format.coin import Coin
|
|
11
|
+
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
1
|
-
from chia.util.ints import uint32
|
|
12
|
++
from chia.util.ints import uint32, uint64
|
|
13
|
+
from tests.core.make_block_generator import make_block_generator
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@pytest.mark.parametrize("add_amount", [[0], [1, 2, 3], []])
|
|
17
|
+
@pytest.mark.parametrize("rem_amount", [[0], [1, 2, 3], []])
|
|
18
|
+
def test_compute_block_fee(add_amount: List[int], rem_amount: List[int]) -> None:
|
|
2
|
-
additions: List[Coin] = [Coin(bytes32.random(), bytes32.random(), amt) for amt in add_amount]
|
|
3
|
-
removals: List[Coin] = [Coin(bytes32.random(), bytes32.random(), amt) for amt in rem_amount]
|
|
19
|
++
additions: List[Coin] = [Coin(bytes32.random(), bytes32.random(), uint64(amt)) for amt in add_amount]
|
|
20
|
++
removals: List[Coin] = [Coin(bytes32.random(), bytes32.random(), uint64(amt)) for amt in rem_amount]
|
|
21
|
+
|
|
22
|
+
# the fee is the left-overs from the removals (spent) coins after deducting
|
|
23
|
+
# the newly created coins (additions)
|
|
24
|
+
expected = sum(rem_amount) - sum(add_amount)
|
|
25
|
+
|
|
26
|
+
if expected < 0:
|
|
27
|
+
with pytest.raises(ValueError, match="does not fit into uint64"):
|
|
28
|
+
compute_block_fee(additions, removals)
|
|
29
|
+
else:
|
|
30
|
+
assert compute_block_fee(additions, removals) == expected
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def test_compute_block_cost(softfork_height: uint32) -> None:
|
|
34
|
+
num_coins = 10
|
|
35
|
+
generator = make_block_generator(num_coins)
|
|
36
|
+
cost = int(compute_block_cost(generator, DEFAULT_CONSTANTS, softfork_height))
|
|
37
|
+
|
|
38
|
+
coin_cost = ConditionCost.CREATE_COIN.value * num_coins
|
|
39
|
+
agg_sig_cost = ConditionCost.AGG_SIG.value * num_coins
|
|
40
|
+
|
|
41
|
+
cost -= coin_cost
|
|
42
|
+
cost -= agg_sig_cost
|
|
43
|
+
cost -= len(bytes(generator.program)) * DEFAULT_CONSTANTS.COST_PER_BYTE
|
|
44
|
+
|
|
45
|
+
print(f"{cost=}")
|
|
46
|
+
|
|
47
|
+
# the cost is a non-trivial combination of the CLVM cost of running the puzzles
|
|
48
|
+
# and before the hard-fork, combined with the cost of running the generator ROM
|
|
49
|
+
# Consensus requires these costs to be unchanged over time, so this test
|
|
50
|
+
# ensures compatibility
|
|
51
|
+
if softfork_height >= DEFAULT_CONSTANTS.HARD_FORK_HEIGHT:
|
|
52
|
+
expected = 180980
|
|
53
|
+
else:
|
|
54
|
+
expected = 3936699
|
|
55
|
+
|
|
56
|
+
assert cost == expected
|
|
@@@ -78,32 -78,32 +78,32 @@@ def test_construction() -> None
|
|
|
78
78
|
|
|
79
79
|
with pytest.raises(OverflowError, match="int too big to convert"):
|
|
80
80
|
# overflow
|
|
81
|
--
Coin(H1, H2, 0x10000000000000000)
|
|
81
|
++
Coin(H1, H2, 0x10000000000000000) # type: ignore[arg-type]
|
|
82
82
|
|
|
83
83
|
with pytest.raises(OverflowError, match="can't convert negative int to unsigned"):
|
|
84
84
|
# overflow
|
|
85
|
--
Coin(H1, H2, -1)
|
|
85
|
++
Coin(H1, H2, -1) # type: ignore[arg-type]
|
|
86
86
|
|
|
87
87
|
H1_short = b"a" * 31
|
|
88
88
|
H1_long = b"a" * 33
|
|
89
89
|
|
|
90
|
--
with pytest.raises(ValueError):
|
|
90
|
++
with pytest.raises(ValueError, match="could not convert slice to array"):
|
|
91
91
|
# short hash
|
|
92
|
--
Coin(H1_short, H2, 1)
|
|
92
|
++
Coin(H1_short, H2, uint64(1))
|
|
93
93
|
|
|
94
|
--
with pytest.raises(ValueError):
|
|
94
|
++
with pytest.raises(ValueError, match="could not convert slice to array"):
|
|
95
95
|
# long hash
|
|
96
|
--
Coin(H1_long, H2, 1)
|
|
96
|
++
Coin(H1_long, H2, uint64(1))
|
|
97
97
|
|
|
98
|
--
with pytest.raises(ValueError):
|
|
98
|
++
with pytest.raises(ValueError, match="could not convert slice to array"):
|
|
99
99
|
# short hash
|
|
100
|
--
Coin(H2, H1_short, 1)
|
|
100
|
++
Coin(H2, H1_short, uint64(1))
|
|
101
101
|
|
|
102
|
--
with pytest.raises(ValueError):
|
|
102
|
++
with pytest.raises(ValueError, match="could not convert slice to array"):
|
|
103
103
|
# long hash
|
|
104
|
--
Coin(H2, H1_long, 1)
|
|
104
|
++
Coin(H2, H1_long, uint64(1))
|
|
105
105
|
|
|
106
|
--
c = Coin(H1, H2, 1000)
|
|
106
|
++
c = Coin(H1, H2, uint64(1000))
|
|
107
107
|
assert c.parent_coin_info == H1
|
|
108
108
|
assert c.puzzle_hash == H2
|
|
109
109
|
assert c.amount == 1000
|
|
@@@ -14,6 -15,6 +14,7 @@@ from chia.types.coin_spend import CoinS
|
|
|
14
14
|
from chia.types.condition_opcodes import ConditionOpcode
|
|
15
15
|
from chia.types.spend_bundle import SpendBundle
|
|
16
16
|
from chia.util.errors import ValidationError
|
|
17
|
++
from chia.util.ints import uint64
|
|
17
18
|
|
|
18
19
|
BLANK_SPEND_BUNDLE = SpendBundle(coin_spends=[], aggregated_signature=G2Element())
|
|
19
20
|
NULL_SIGNATURE = "0xc" + "0" * 191
|
|
@@@ -59,10 -104,10 +60,10 @@@ def create_spends(num: int) -> Tuple[Li
|
|
|
59
60
|
for i in range(num):
|
|
60
61
|
target_ph = rand_hash(rng)
|
|
61
62
|
conditions = [[ConditionOpcode.CREATE_COIN, target_ph, 1]]
|
|
62
|
--
coin = Coin(rand_hash(rng), puzzle_hash, 1000)
|
|
63
|
--
new_coin = Coin(coin.name(), target_ph, 1)
|
|
63
|
++
coin = Coin(rand_hash(rng), puzzle_hash, uint64(1000))
|
|
64
|
++
new_coin = Coin(coin.name(), target_ph, uint64(1))
|
|
64
65
|
create_coin.append(new_coin)
|
|
65
|
-
spends.append(
|
|
66
|
+
spends.append(make_spend(coin, puzzle, Program.to(conditions)))
|
|
66
67
|
|
|
67
68
|
return spends, create_coin
|
|
68
69
|
|
|
@@@ -9,6 -9,6 +9,7 @@@ import o
|
|
|
9
9
|
import random
|
|
10
10
|
import sys
|
|
11
11
|
import time
|
|
12
|
++
from copy import deepcopy
|
|
12
13
|
from dataclasses import dataclass
|
|
13
14
|
from pathlib import Path
|
|
14
15
|
from typing import Any, AsyncIterator, Dict, List, Optional, Tuple, cast
|
|
@@@ -16,11 -16,12 +17,27 @@@
|
|
|
16
17
|
import anyio
|
|
17
18
|
import pytest
|
|
18
19
|
|
|
19
|
--
from chia.cmds.data_funcs import
|
|
20
|
++
from chia.cmds.data_funcs import (
|
|
21
|
++
clear_pending_roots,
|
|
22
|
++
get_keys_cmd,
|
|
23
|
++
get_keys_values_cmd,
|
|
24
|
++
get_kv_diff_cmd,
|
|
25
|
++
get_proof_cmd,
|
|
26
|
++
verify_proof_cmd,
|
|
27
|
++
wallet_log_in_cmd,
|
|
28
|
++
)
|
|
20
29
|
from chia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
|
|
21
30
|
from chia.data_layer.data_layer import DataLayer
|
|
22
|
-
from chia.data_layer.
|
|
23
|
--
from chia.data_layer.
|
|
24
|
--
|
|
31
|
++
from chia.data_layer.data_layer_errors import KeyNotFoundError, OfferIntegrityError
|
|
32
|
++
from chia.data_layer.data_layer_util import (
|
|
33
|
++
HashOnlyProof,
|
|
34
|
++
OfferStore,
|
|
35
|
++
ProofLayer,
|
|
36
|
++
Status,
|
|
37
|
++
StoreProofs,
|
|
38
|
++
key_hash,
|
|
39
|
++
leaf_hash,
|
|
40
|
++
)
|
|
25
41
|
from chia.data_layer.data_layer_wallet import DataLayerWallet, verify_offer
|
|
26
42
|
from chia.data_layer.download_data import get_delta_filename, get_full_tree_filename
|
|
27
43
|
from chia.rpc.data_layer_rpc_api import DataLayerRpcApi
|
|
@@@ -35,7 -37,7 +52,8 @@@ from chia.types.blockchain_format.sized
|
|
|
35
52
|
from chia.types.peer_info import PeerInfo
|
|
36
53
|
from chia.util.byte_types import hexstr_to_bytes
|
|
37
54
|
from chia.util.config import save_config
|
|
38
|
--
from chia.util.
|
|
55
|
++
from chia.util.hash import std_hash
|
|
56
|
++
from chia.util.ints import uint8, uint16, uint32, uint64
|
|
39
57
|
from chia.util.keychain import bytes_to_mnemonic
|
|
40
58
|
from chia.util.timing import adjusted_timeout, backoff_times
|
|
41
59
|
from chia.wallet.trading.offer import Offer as TradingOffer
|
|
@@@ -425,6 -398,6 +443,11 @@@ async def test_keys_values_ancestors
|
|
|
425
443
|
assert len(pairs_before["keys_values"]) == len(keys_before["keys"]) == 5
|
|
426
444
|
assert len(pairs_after["keys_values"]) == len(keys_after["keys"]) == 7
|
|
427
445
|
|
|
446
|
++
with pytest.raises(Exception, match="Can't find keys"):
|
|
447
|
++
await data_rpc_api.get_keys({"id": store_id.hex(), "root_hash": bytes32([0] * 31 + [1]).hex()})
|
|
448
|
++
with pytest.raises(Exception, match="Can't find keys and values"):
|
|
449
|
++
await data_rpc_api.get_keys_values({"id": store_id.hex(), "root_hash": bytes32([0] * 31 + [1]).hex()})
|
|
450
|
++
|
|
428
451
|
|
|
429
452
|
@pytest.mark.anyio
|
|
430
453
|
async def test_get_roots(
|
|
@@@ -759,6 -723,6 +782,7 @@@ class StoreSetup
|
|
|
759
782
|
id: bytes32
|
|
760
783
|
original_hash: bytes32
|
|
761
784
|
data_layer: DataLayer
|
|
785
|
++
data_rpc_client: DataLayerRpcClient
|
|
762
786
|
|
|
763
787
|
|
|
764
788
|
@dataclass(frozen=True)
|
|
@@@ -792,12 -756,12 +816,20 @@@ async def offer_setup_fixture
|
|
|
792
816
|
for wallet_service in wallet_services:
|
|
793
817
|
assert wallet_service.rpc_server is not None
|
|
794
818
|
port = wallet_service.rpc_server.listen_port
|
|
795
|
--
|
|
796
|
--
|
|
819
|
++
data_layer_service = await exit_stack.enter_async_context(
|
|
820
|
++
init_data_layer_service(
|
|
797
821
|
wallet_rpc_port=port, wallet_service=wallet_service, bt=bt, db_path=tmp_path.joinpath(str(port))
|
|
798
822
|
)
|
|
799
823
|
)
|
|
824
|
++
data_layer = data_layer_service._api.data_layer
|
|
800
825
|
data_rpc_api = DataLayerRpcApi(data_layer)
|
|
826
|
++
assert data_layer_service.rpc_server is not None
|
|
827
|
++
data_rpc_client = await DataLayerRpcClient.create(
|
|
828
|
++
self_hostname,
|
|
829
|
++
port=data_layer_service.rpc_server.listen_port,
|
|
830
|
++
root_path=bt.root_path,
|
|
831
|
++
net_config=bt.config,
|
|
832
|
++
)
|
|
801
833
|
|
|
802
834
|
create_response = await data_rpc_api.create_data_store({"verbose": True})
|
|
803
835
|
await full_node_api.process_transaction_records(records=create_response["txs"], timeout=60)
|
|
@@@ -808,6 -772,6 +840,7 @@@
|
|
|
808
840
|
id=bytes32.from_hexstr(create_response["id"]),
|
|
809
841
|
original_hash=bytes32([0] * 32),
|
|
810
842
|
data_layer=data_layer,
|
|
843
|
++
data_rpc_client=data_rpc_client,
|
|
811
844
|
)
|
|
812
845
|
)
|
|
813
846
|
|
|
@@@ -847,16 -811,16 +880,23 @@@
|
|
|
847
880
|
id=maker.id,
|
|
848
881
|
original_hash=maker_original_root_hash,
|
|
849
882
|
data_layer=maker.data_layer,
|
|
883
|
++
data_rpc_client=maker.data_rpc_client,
|
|
850
884
|
),
|
|
851
885
|
taker=StoreSetup(
|
|
852
886
|
api=taker.api,
|
|
853
887
|
id=taker.id,
|
|
854
888
|
original_hash=taker_original_root_hash,
|
|
855
889
|
data_layer=taker.data_layer,
|
|
890
|
++
data_rpc_client=taker.data_rpc_client,
|
|
856
891
|
),
|
|
857
892
|
full_node_api=full_node_api,
|
|
858
893
|
)
|
|
859
894
|
|
|
895
|
++
maker.data_rpc_client.close()
|
|
896
|
++
await maker.data_rpc_client.await_closed()
|
|
897
|
++
taker.data_rpc_client.close()
|
|
898
|
++
await taker.data_rpc_client.await_closed()
|
|
899
|
++
|
|
860
900
|
|
|
861
901
|
async def populate_offer_setup(offer_setup: OfferSetup, count: int) -> OfferSetup:
|
|
862
902
|
if count > 0:
|
|
@@@ -906,12 -870,12 +946,14 @@@
|
|
|
906
946
|
id=offer_setup.maker.id,
|
|
907
947
|
original_hash=maker_original_root_hash,
|
|
908
948
|
data_layer=offer_setup.maker.data_layer,
|
|
949
|
++
data_rpc_client=offer_setup.maker.data_rpc_client,
|
|
909
950
|
),
|
|
910
951
|
taker=StoreSetup(
|
|
911
952
|
api=offer_setup.taker.api,
|
|
912
953
|
id=offer_setup.taker.id,
|
|
913
954
|
original_hash=taker_original_root_hash,
|
|
914
955
|
data_layer=offer_setup.taker.data_layer,
|
|
956
|
++
data_rpc_client=offer_setup.taker.data_rpc_client,
|
|
915
957
|
),
|
|
916
958
|
full_node_api=offer_setup.full_node_api,
|
|
917
959
|
)
|
|
@@@ -2292,39 -2252,3 +2334,823 @@@ async def test_wallet_log_in_changes_ac
|
|
|
2292
2334
|
|
|
2293
2335
|
active_fingerprint = cast(int, (await wallet_rpc_api.get_logged_in_fingerprint(request={}))["fingerprint"])
|
|
2294
2336
|
assert active_fingerprint == secondary_fingerprint
|
|
2337
|
+
|
|
2338
|
+
|
|
2339
|
+
@pytest.mark.limit_consensus_modes(reason="does not depend on consensus rules")
|
|
2340
|
+
@pytest.mark.anyio
|
|
2341
|
+
async def test_mirrors(
|
|
2342
|
+
self_hostname: str, one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
|
|
2343
|
+
) -> None:
|
|
2344
|
+
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
|
|
2345
|
+
self_hostname, one_wallet_and_one_simulator_services
|
|
2346
|
+
)
|
|
2347
|
+
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
|
|
2348
|
+
data_rpc_api = DataLayerRpcApi(data_layer)
|
|
2349
|
+
res = await data_rpc_api.create_data_store({})
|
|
2350
|
+
assert res is not None
|
|
2351
|
+
store_id = bytes32(hexstr_to_bytes(res["id"]))
|
|
2352
|
+
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
|
2353
|
+
|
|
2354
|
+
urls = ["http://127.0.0.1/8000", "http://127.0.0.1/8001"]
|
|
2355
|
+
res = await data_rpc_api.add_mirror({"id": store_id.hex(), "urls": urls, "amount": 1, "fee": 1})
|
|
2356
|
+
|
|
2357
|
+
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
|
2358
|
+
mirrors = await data_rpc_api.get_mirrors({"id": store_id.hex()})
|
|
2359
|
+
mirror_list = mirrors["mirrors"]
|
|
2360
|
+
assert len(mirror_list) == 1
|
|
2361
|
+
mirror = mirror_list[0]
|
|
2362
|
+
assert mirror["urls"] == ["http://127.0.0.1/8000", "http://127.0.0.1/8001"]
|
|
2363
|
+
coin_id = mirror["coin_id"]
|
|
2364
|
+
|
|
2365
|
+
res = await data_rpc_api.delete_mirror({"coin_id": coin_id, "fee": 1})
|
|
2366
|
+
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
|
2367
|
+
mirrors = await data_rpc_api.get_mirrors({"id": store_id.hex()})
|
|
2368
|
+
mirror_list = mirrors["mirrors"]
|
|
2369
|
+
assert len(mirror_list) == 0
|
|
2370
|
+
|
|
2371
|
+
with pytest.raises(RuntimeError, match="URL list can't be empty"):
|
|
2372
|
+
res = await data_rpc_api.add_mirror({"id": store_id.hex(), "urls": [], "amount": 1, "fee": 1})
|
|
2373
|
++
|
|
2374
|
++
|
|
2375
|
++
@dataclass(frozen=True)
|
|
2376
|
++
class ProofReference:
|
|
2377
|
++
entries_to_insert: int
|
|
2378
|
++
keys_to_prove: List[str]
|
|
2379
|
++
verify_proof_response: Dict[str, Any]
|
|
2380
|
++
|
|
2381
|
++
|
|
2382
|
++
def populate_reference(count: int, keys_to_prove: int) -> ProofReference:
|
|
2383
|
++
ret = ProofReference(
|
|
2384
|
++
entries_to_insert=count,
|
|
2385
|
++
keys_to_prove=[value.to_bytes(length=1, byteorder="big").hex() for value in range(keys_to_prove)],
|
|
2386
|
++
verify_proof_response={
|
|
2387
|
++
"current_root": True,
|
|
2388
|
++
"success": True,
|
|
2389
|
++
"verified_clvm_hashes": {
|
|
2390
|
++
"store_id": "",
|
|
2391
|
++
"inclusions": [
|
|
2392
|
++
{
|
|
2393
|
++
"key_clvm_hash": "0x" + std_hash(b"\1" + value.to_bytes(length=1, byteorder="big")).hex(),
|
|
2394
|
++
"value_clvm_hash": "0x"
|
|
2395
|
++
+ std_hash(b"\1" + b"\x01" + value.to_bytes(length=1, byteorder="big")).hex(),
|
|
2396
|
++
}
|
|
2397
|
++
for value in range(keys_to_prove)
|
|
2398
|
++
],
|
|
2399
|
++
},
|
|
2400
|
++
},
|
|
2401
|
++
)
|
|
2402
|
++
return ret
|
|
2403
|
++
|
|
2404
|
++
|
|
2405
|
++
async def populate_proof_setup(offer_setup: OfferSetup, count: int) -> OfferSetup:
|
|
2406
|
++
if count > 0:
|
|
2407
|
++
# Only need data in the maker for proofs
|
|
2408
|
++
value_prefix = b"\x01"
|
|
2409
|
++
store_setup = offer_setup.maker
|
|
2410
|
++
await store_setup.api.batch_update(
|
|
2411
|
++
{
|
|
2412
|
++
"id": store_setup.id.hex(),
|
|
2413
|
++
"changelist": [
|
|
2414
|
++
{
|
|
2415
|
++
"action": "insert",
|
|
2416
|
++
"key": value.to_bytes(length=1, byteorder="big").hex(),
|
|
2417
|
++
"value": (value_prefix + value.to_bytes(length=1, byteorder="big")).hex(),
|
|
2418
|
++
}
|
|
2419
|
++
for value in range(count)
|
|
2420
|
++
],
|
|
2421
|
++
}
|
|
2422
|
++
)
|
|
2423
|
++
|
|
2424
|
++
await process_for_data_layer_keys(
|
|
2425
|
++
expected_key=b"\x00",
|
|
2426
|
++
full_node_api=offer_setup.full_node_api,
|
|
2427
|
++
data_layer=offer_setup.maker.data_layer,
|
|
2428
|
++
store_id=offer_setup.maker.id,
|
|
2429
|
++
)
|
|
2430
|
++
|
|
2431
|
++
maker_original_singleton = await offer_setup.maker.data_layer.get_root(store_id=offer_setup.maker.id)
|
|
2432
|
++
assert maker_original_singleton is not None
|
|
2433
|
++
maker_original_root_hash = maker_original_singleton.root
|
|
2434
|
++
|
|
2435
|
++
return OfferSetup(
|
|
2436
|
++
maker=StoreSetup(
|
|
2437
|
++
api=offer_setup.maker.api,
|
|
2438
|
++
id=offer_setup.maker.id,
|
|
2439
|
++
original_hash=maker_original_root_hash,
|
|
2440
|
++
data_layer=offer_setup.maker.data_layer,
|
|
2441
|
++
data_rpc_client=offer_setup.maker.data_rpc_client,
|
|
2442
|
++
),
|
|
2443
|
++
taker=StoreSetup(
|
|
2444
|
++
api=offer_setup.taker.api,
|
|
2445
|
++
id=offer_setup.taker.id,
|
|
2446
|
++
original_hash=bytes32([0] * 32),
|
|
2447
|
++
data_layer=offer_setup.taker.data_layer,
|
|
2448
|
++
data_rpc_client=offer_setup.taker.data_rpc_client,
|
|
2449
|
++
),
|
|
2450
|
++
full_node_api=offer_setup.full_node_api,
|
|
2451
|
++
)
|
|
2452
|
++
|
|
2453
|
++
|
|
2454
|
++
@pytest.mark.parametrize(
|
|
2455
|
++
argnames="reference",
|
|
2456
|
++
argvalues=[
|
|
2457
|
++
pytest.param(populate_reference(count=5, keys_to_prove=1), id="one key"),
|
|
2458
|
++
pytest.param(populate_reference(count=5, keys_to_prove=2), id="two keys"),
|
|
2459
|
++
pytest.param(populate_reference(count=5, keys_to_prove=5), id="five keys"),
|
|
2460
|
++
],
|
|
2461
|
++
)
|
|
2462
|
++
@pytest.mark.limit_consensus_modes(reason="does not depend on consensus rules")
|
|
2463
|
++
@pytest.mark.anyio
|
|
2464
|
++
async def test_dl_proof(offer_setup: OfferSetup, reference: ProofReference) -> None:
|
|
2465
|
++
offer_setup = await populate_proof_setup(offer_setup=offer_setup, count=reference.entries_to_insert)
|
|
2466
|
++
reference.verify_proof_response["verified_clvm_hashes"]["store_id"] = f"0x{offer_setup.maker.id.hex()}"
|
|
2467
|
++
|
|
2468
|
++
#
|
|
2469
|
++
# Ideally this would use the InterfaceLayer as a parameterized list, however, all the fixtures
|
|
2470
|
++
# are function scoped, which makes it very long to run this but this doesn't change any of the
|
|
2471
|
++
# data, so rerunning all the setup for each test is not needed - module scope would be perfect
|
|
2472
|
++
# but it requires all the supporting fixtures (wallet/nodes/etc) to have the same scope
|
|
2473
|
++
#
|
|
2474
|
++
|
|
2475
|
++
# random tests for HashOnlyProof root()
|
|
2476
|
++
fakeproof = HashOnlyProof(
|
|
2477
|
++
key_clvm_hash=bytes32([1] * 32), value_clvm_hash=bytes32([1] * 32), node_hash=bytes32([3] * 32), layers=[]
|
|
2478
|
++
)
|
|
2479
|
++
assert fakeproof.root() == fakeproof.node_hash
|
|
2480
|
++
|
|
2481
|
++
fakeproof = HashOnlyProof(
|
|
2482
|
++
key_clvm_hash=bytes32([1] * 32),
|
|
2483
|
++
value_clvm_hash=bytes32([1] * 32),
|
|
2484
|
++
node_hash=bytes32([3] * 32),
|
|
2485
|
++
layers=[
|
|
2486
|
++
ProofLayer(other_hash_side=uint8(0), other_hash=bytes32([1] * 32), combined_hash=bytes32([5] * 32)),
|
|
2487
|
++
ProofLayer(other_hash_side=uint8(0), other_hash=bytes32([1] * 32), combined_hash=bytes32([7] * 32)),
|
|
2488
|
++
],
|
|
2489
|
++
)
|
|
2490
|
++
assert fakeproof.root() == bytes32([7] * 32)
|
|
2491
|
++
|
|
2492
|
++
# Test InterfaceLayer.direct
|
|
2493
|
++
proof = await offer_setup.maker.api.get_proof(
|
|
2494
|
++
request={"store_id": offer_setup.maker.id.hex(), "keys": reference.keys_to_prove}
|
|
2495
|
++
)
|
|
2496
|
++
assert proof["success"] is True
|
|
2497
|
++
verify = await offer_setup.taker.api.verify_proof(request=proof["proof"])
|
|
2498
|
++
assert verify == reference.verify_proof_response
|
|
2499
|
++
|
|
2500
|
++
# test InterfaceLayer.client
|
|
2501
|
++
proof = dict()
|
|
2502
|
++
verify = dict()
|
|
2503
|
++
proof = await offer_setup.maker.data_rpc_client.get_proof(
|
|
2504
|
++
store_id=offer_setup.maker.id, keys=[hexstr_to_bytes(key) for key in reference.keys_to_prove]
|
|
2505
|
++
)
|
|
2506
|
++
assert proof["success"] is True
|
|
2507
|
++
verify = await offer_setup.taker.data_rpc_client.verify_proof(proof=proof["proof"])
|
|
2508
|
++
assert verify == reference.verify_proof_response
|
|
2509
|
++
|
|
2510
|
++
# test InterfaceLayer.func
|
|
2511
|
++
proof = dict()
|
|
2512
|
++
verify = dict()
|
|
2513
|
++
proof = await get_proof_cmd(
|
|
2514
|
++
store_id=offer_setup.maker.id,
|
|
2515
|
++
key_strings=reference.keys_to_prove,
|
|
2516
|
++
rpc_port=offer_setup.maker.data_rpc_client.port,
|
|
2517
|
++
root_path=offer_setup.maker.data_layer.root_path,
|
|
2518
|
++
)
|
|
2519
|
++
assert proof["success"] is True
|
|
2520
|
++
verify = await verify_proof_cmd(
|
|
2521
|
++
proof=proof["proof"],
|
|
2522
|
++
rpc_port=offer_setup.taker.data_rpc_client.port,
|
|
2523
|
++
root_path=offer_setup.taker.data_layer.root_path,
|
|
2524
|
++
)
|
|
2525
|
++
assert verify == reference.verify_proof_response
|
|
2526
|
++
|
|
2527
|
++
# test InterfaceLayer.cli
|
|
2528
|
++
key_args: List[str] = []
|
|
2529
|
++
for key in reference.keys_to_prove:
|
|
2530
|
++
key_args.append("--key")
|
|
2531
|
++
key_args.append(key)
|
|
2532
|
++
|
|
2533
|
++
process = await run_cli_cmd(
|
|
2534
|
++
"data",
|
|
2535
|
++
"get_proof",
|
|
2536
|
++
"--id",
|
|
2537
|
++
offer_setup.maker.id.hex(),
|
|
2538
|
++
*key_args,
|
|
2539
|
++
"--data-rpc-port",
|
|
2540
|
++
str(offer_setup.maker.data_rpc_client.port),
|
|
2541
|
++
root_path=offer_setup.maker.data_layer.root_path,
|
|
2542
|
++
)
|
|
2543
|
++
assert process.stdout is not None
|
|
2544
|
++
raw_output = await process.stdout.read()
|
|
2545
|
++
proof = json.loads(raw_output)
|
|
2546
|
++
assert proof["success"] is True
|
|
2547
|
++
|
|
2548
|
++
process = await run_cli_cmd(
|
|
2549
|
++
"data",
|
|
2550
|
++
"verify_proof",
|
|
2551
|
++
"-p",
|
|
2552
|
++
json.dumps(proof["proof"]),
|
|
2553
|
++
"--data-rpc-port",
|
|
2554
|
++
str(offer_setup.taker.data_rpc_client.port),
|
|
2555
|
++
root_path=offer_setup.taker.data_layer.root_path,
|
|
2556
|
++
)
|
|
2557
|
++
assert process.stdout is not None
|
|
2558
|
++
raw_output = await process.stdout.read()
|
|
2559
|
++
verify = json.loads(raw_output)
|
|
2560
|
++
assert verify == reference.verify_proof_response
|
|
2561
|
++
|
|
2562
|
++
|
|
2563
|
++
@pytest.mark.limit_consensus_modes(reason="does not depend on consensus rules")
|
|
2564
|
++
@pytest.mark.anyio
|
|
2565
|
++
async def test_dl_proof_errors(
|
|
2566
|
++
self_hostname: str, one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
|
|
2567
|
++
) -> None:
|
|
2568
|
++
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
|
|
2569
|
++
self_hostname, one_wallet_and_one_simulator_services
|
|
2570
|
++
)
|
|
2571
|
++
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
|
|
2572
|
++
data_rpc_api = DataLayerRpcApi(data_layer)
|
|
2573
|
++
fakeroot = bytes32([4] * 32)
|
|
2574
|
++
res = await data_rpc_api.create_data_store({})
|
|
2575
|
++
assert res is not None
|
|
2576
|
++
store_id = bytes32(hexstr_to_bytes(res["id"]))
|
|
2577
|
++
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
|
2578
|
++
|
|
2579
|
++
with pytest.raises(ValueError, match="no root"):
|
|
2580
|
++
await data_rpc_api.get_proof(request={"store_id": fakeroot.hex(), "keys": []})
|
|
2581
|
++
|
|
2582
|
++
with pytest.raises(KeyNotFoundError, match="Key not found"):
|
|
2583
|
++
await data_rpc_api.get_proof(request={"store_id": store_id.hex(), "keys": [b"4".hex()]})
|
|
2584
|
++
|
|
2585
|
++
|
|
2586
|
++
@pytest.mark.limit_consensus_modes(reason="does not depend on consensus rules")
|
|
2587
|
++
@pytest.mark.anyio
|
|
2588
|
++
async def test_dl_proof_verify_errors(offer_setup: OfferSetup, seeded_random: random.Random) -> None:
|
|
2589
|
++
two_key_proof = populate_reference(count=5, keys_to_prove=2)
|
|
2590
|
++
offer_setup = await populate_proof_setup(offer_setup=offer_setup, count=two_key_proof.entries_to_insert)
|
|
2591
|
++
two_key_proof.verify_proof_response["verified_clvm_hashes"]["store_id"] = f"0x{offer_setup.maker.id.hex()}"
|
|
2592
|
++
|
|
2593
|
++
proof = await offer_setup.maker.api.get_proof(
|
|
2594
|
++
request={"store_id": offer_setup.maker.id.hex(), "keys": two_key_proof.keys_to_prove}
|
|
2595
|
++
)
|
|
2596
|
++
assert proof["success"] is True
|
|
2597
|
++
|
|
2598
|
++
verify = await offer_setup.taker.api.verify_proof(request=proof["proof"])
|
|
2599
|
++
assert verify == two_key_proof.verify_proof_response
|
|
2600
|
++
|
|
2601
|
++
# test bad coin id
|
|
2602
|
++
badproof = deepcopy(proof["proof"])
|
|
2603
|
++
badproof["coin_id"] = bytes32.random(seeded_random).hex()
|
|
2604
|
++
with pytest.raises(ValueError, match="Invalid Proof: No DL singleton found at coin id"):
|
|
2605
|
++
await offer_setup.taker.api.verify_proof(request=badproof)
|
|
2606
|
++
|
|
2607
|
++
# test bad innerpuz
|
|
2608
|
++
badproof = deepcopy(proof["proof"])
|
|
2609
|
++
badproof["inner_puzzle_hash"] = bytes32.random(seeded_random).hex()
|
|
2610
|
++
with pytest.raises(ValueError, match="Invalid Proof: incorrect puzzle hash"):
|
|
2611
|
++
await offer_setup.taker.api.verify_proof(request=badproof)
|
|
2612
|
++
|
|
2613
|
++
# test bad key
|
|
2614
|
++
badproof = deepcopy(proof["proof"])
|
|
2615
|
++
badproof["store_proofs"]["proofs"][0]["key_clvm_hash"] = bytes32.random(seeded_random).hex()
|
|
2616
|
++
with pytest.raises(ValueError, match="Invalid Proof: node hash does not match key and value"):
|
|
2617
|
++
await offer_setup.taker.api.verify_proof(request=badproof)
|
|
2618
|
++
|
|
2619
|
++
# test bad value
|
|
2620
|
++
badproof = deepcopy(proof["proof"])
|
|
2621
|
++
badproof["store_proofs"]["proofs"][0]["value_clvm_hash"] = bytes32.random(seeded_random).hex()
|
|
2622
|
++
with pytest.raises(ValueError, match="Invalid Proof: node hash does not match key and value"):
|
|
2623
|
++
await offer_setup.taker.api.verify_proof(request=badproof)
|
|
2624
|
++
|
|
2625
|
++
# test bad layer hash
|
|
2626
|
++
badproof = deepcopy(proof["proof"])
|
|
2627
|
++
badproof["store_proofs"]["proofs"][0]["layers"][1]["other_hash"] = bytes32.random(seeded_random).hex()
|
|
2628
|
++
with pytest.raises(ValueError, match="Invalid Proof: invalid proof of inclusion found"):
|
|
2629
|
++
await offer_setup.taker.api.verify_proof(request=badproof)
|
|
2630
|
++
|
|
2631
|
++
|
|
2632
|
++
@pytest.mark.limit_consensus_modes(reason="does not depend on consensus rules")
|
|
2633
|
++
@pytest.mark.anyio
|
|
2634
|
++
async def test_dl_proof_changed_root(offer_setup: OfferSetup, seeded_random: random.Random) -> None:
|
|
2635
|
++
two_key_proof = populate_reference(count=5, keys_to_prove=2)
|
|
2636
|
++
offer_setup = await populate_proof_setup(offer_setup=offer_setup, count=two_key_proof.entries_to_insert)
|
|
2637
|
++
two_key_proof.verify_proof_response["verified_clvm_hashes"]["store_id"] = f"0x{offer_setup.maker.id.hex()}"
|
|
2638
|
++
|
|
2639
|
++
proof = await offer_setup.maker.api.get_proof(
|
|
2640
|
++
request={"store_id": offer_setup.maker.id.hex(), "keys": two_key_proof.keys_to_prove}
|
|
2641
|
++
)
|
|
2642
|
++
assert proof["success"] is True
|
|
2643
|
++
|
|
2644
|
++
verify = await offer_setup.taker.api.verify_proof(request=proof["proof"])
|
|
2645
|
++
assert verify == two_key_proof.verify_proof_response
|
|
2646
|
++
|
|
2647
|
++
key = b"a"
|
|
2648
|
++
value = b"\x00\x01"
|
|
2649
|
++
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key.hex(), "value": value.hex()}]
|
|
2650
|
++
await offer_setup.maker.api.batch_update({"id": offer_setup.maker.id.hex(), "changelist": changelist})
|
|
2651
|
++
|
|
2652
|
++
await process_for_data_layer_keys(
|
|
2653
|
++
expected_key=key,
|
|
2654
|
++
expected_value=value,
|
|
2655
|
++
full_node_api=offer_setup.full_node_api,
|
|
2656
|
++
data_layer=offer_setup.maker.data_layer,
|
|
2657
|
++
store_id=offer_setup.maker.id,
|
|
2658
|
++
)
|
|
2659
|
++
|
|
2660
|
++
root_changed = await offer_setup.taker.api.verify_proof(request=proof["proof"])
|
|
2661
|
++
assert root_changed == {**verify, "current_root": False}
|
|
2662
|
++
|
|
2663
|
++
|
|
2664
|
++
@pytest.mark.limit_consensus_modes(reason="does not depend on consensus rules")
|
|
2665
|
++
@pytest.mark.anyio
|
|
2666
|
++
async def test_pagination_rpcs(
|
|
2667
|
++
self_hostname: str, one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
|
|
2668
|
++
) -> None:
|
|
2669
|
++
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
|
|
2670
|
++
self_hostname, one_wallet_and_one_simulator_services
|
|
2671
|
++
)
|
|
2672
|
++
# TODO: with this being a pseudo context manager'ish thing it doesn't actually handle shutdown
|
|
2673
|
++
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
|
|
2674
|
++
data_rpc_api = DataLayerRpcApi(data_layer)
|
|
2675
|
++
res = await data_rpc_api.create_data_store({})
|
|
2676
|
++
assert res is not None
|
|
2677
|
++
store_id = bytes32(hexstr_to_bytes(res["id"]))
|
|
2678
|
++
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
|
2679
|
++
key1 = b"aa"
|
|
2680
|
++
value1 = b"\x01\x02"
|
|
2681
|
++
key1_hash = key_hash(key1)
|
|
2682
|
++
leaf_hash1 = leaf_hash(key1, value1)
|
|
2683
|
++
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key1.hex(), "value": value1.hex()}]
|
|
2684
|
++
key2 = b"ba"
|
|
2685
|
++
value2 = b"\x03\x02"
|
|
2686
|
++
key2_hash = key_hash(key2)
|
|
2687
|
++
leaf_hash2 = leaf_hash(key2, value2)
|
|
2688
|
++
changelist.append({"action": "insert", "key": key2.hex(), "value": value2.hex()})
|
|
2689
|
++
key3 = b"ccc"
|
|
2690
|
++
value3 = b"\x04\x05"
|
|
2691
|
++
changelist.append({"action": "insert", "key": key3.hex(), "value": value3.hex()})
|
|
2692
|
++
leaf_hash3 = leaf_hash(key3, value3)
|
|
2693
|
++
key4 = b"d"
|
|
2694
|
++
value4 = b"\x06\x03"
|
|
2695
|
++
key4_hash = key_hash(key4)
|
|
2696
|
++
leaf_hash4 = leaf_hash(key4, value4)
|
|
2697
|
++
changelist.append({"action": "insert", "key": key4.hex(), "value": value4.hex()})
|
|
2698
|
++
key5 = b"e"
|
|
2699
|
++
value5 = b"\x07\x01"
|
|
2700
|
++
key5_hash = key_hash(key5)
|
|
2701
|
++
leaf_hash5 = leaf_hash(key5, value5)
|
|
2702
|
++
changelist.append({"action": "insert", "key": key5.hex(), "value": value5.hex()})
|
|
2703
|
++
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
|
|
2704
|
++
update_tx_rec0 = res["tx_id"]
|
|
2705
|
++
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
|
|
2706
|
++
local_root = await data_rpc_api.get_local_root({"id": store_id.hex()})
|
|
2707
|
++
|
|
2708
|
++
keys_reference = {
|
|
2709
|
++
"total_pages": 2,
|
|
2710
|
++
"total_bytes": 9,
|
|
2711
|
++
"keys": [],
|
|
2712
|
++
"root_hash": local_root["hash"],
|
|
2713
|
++
}
|
|
2714
|
++
|
|
2715
|
++
keys_paginated = await data_rpc_api.get_keys({"id": store_id.hex(), "page": 0, "max_page_size": 5})
|
|
2716
|
++
assert key2_hash < key1_hash
|
|
2717
|
++
assert keys_paginated == {**keys_reference, "keys": ["0x" + key3.hex(), "0x" + key2.hex()]}
|
|
2718
|
++
|
|
2719
|
++
keys_paginated = await data_rpc_api.get_keys({"id": store_id.hex(), "page": 1, "max_page_size": 5})
|
|
2720
|
++
assert key5_hash < key4_hash
|
|
2721
|
++
assert keys_paginated == {**keys_reference, "keys": ["0x" + key1.hex(), "0x" + key5.hex(), "0x" + key4.hex()]}
|
|
2722
|
++
|
|
2723
|
++
keys_paginated = await data_rpc_api.get_keys({"id": store_id.hex(), "page": 2, "max_page_size": 5})
|
|
2724
|
++
assert keys_paginated == keys_reference
|
|
2725
|
++
|
|
2726
|
++
keys_values_reference = {
|
|
2727
|
++
"total_pages": 3,
|
|
2728
|
++
"total_bytes": 19,
|
|
2729
|
++
"keys_values": [],
|
|
2730
|
++
"root_hash": local_root["hash"],
|
|
2731
|
++
}
|
|
2732
|
++
keys_values_paginated = await data_rpc_api.get_keys_values(
|
|
2733
|
++
{"id": store_id.hex(), "page": 0, "max_page_size": 8},
|
|
2734
|
++
)
|
|
2735
|
++
expected_kv = [
|
|
2736
|
++
{"atom": None, "hash": "0x" + leaf_hash3.hex(), "key": "0x" + key3.hex(), "value": "0x" + value3.hex()},
|
|
2737
|
++
]
|
|
2738
|
++
assert keys_values_paginated == {**keys_values_reference, "keys_values": expected_kv}
|
|
2739
|
++
|
|
2740
|
++
keys_values_paginated = await data_rpc_api.get_keys_values(
|
|
2741
|
++
{"id": store_id.hex(), "page": 1, "max_page_size": 8}
|
|
2742
|
++
)
|
|
2743
|
++
expected_kv = [
|
|
2744
|
++
{"atom": None, "hash": "0x" + leaf_hash1.hex(), "key": "0x" + key1.hex(), "value": "0x" + value1.hex()},
|
|
2745
|
++
{"atom": None, "hash": "0x" + leaf_hash2.hex(), "key": "0x" + key2.hex(), "value": "0x" + value2.hex()},
|
|
2746
|
++
]
|
|
2747
|
++
assert leaf_hash1 < leaf_hash2
|
|
2748
|
++
assert keys_values_paginated == {**keys_values_reference, "keys_values": expected_kv}
|
|
2749
|
++
|
|
2750
|
++
keys_values_paginated = await data_rpc_api.get_keys_values(
|
|
2751
|
++
{"id": store_id.hex(), "page": 2, "max_page_size": 8}
|
|
2752
|
++
)
|
|
2753
|
++
expected_kv = [
|
|
2754
|
++
{"atom": None, "hash": "0x" + leaf_hash5.hex(), "key": "0x" + key5.hex(), "value": "0x" + value5.hex()},
|
|
2755
|
++
{"atom": None, "hash": "0x" + leaf_hash4.hex(), "key": "0x" + key4.hex(), "value": "0x" + value4.hex()},
|
|
2756
|
++
]
|
|
2757
|
++
assert leaf_hash5 < leaf_hash4
|
|
2758
|
++
assert keys_values_paginated == {**keys_values_reference, "keys_values": expected_kv}
|
|
2759
|
++
|
|
2760
|
++
keys_values_paginated = await data_rpc_api.get_keys_values(
|
|
2761
|
++
{"id": store_id.hex(), "page": 3, "max_page_size": 8}
|
|
2762
|
++
)
|
|
2763
|
++
assert keys_values_paginated == keys_values_reference
|
|
2764
|
++
|
|
2765
|
++
key6 = b"ab"
|
|
2766
|
++
value6 = b"\x01\x01"
|
|
2767
|
++
leaf_hash6 = leaf_hash(key6, value6)
|
|
2768
|
++
key7 = b"ac"
|
|
2769
|
++
value7 = b"\x01\x01"
|
|
2770
|
++
leaf_hash7 = leaf_hash(key7, value7)
|
|
2771
|
++
|
|
2772
|
++
changelist = [{"action": "delete", "key": key3.hex()}]
|
|
2773
|
++
changelist.append({"action": "insert", "key": key6.hex(), "value": value6.hex()})
|
|
2774
|
++
changelist.append({"action": "insert", "key": key7.hex(), "value": value7.hex()})
|
|
2775
|
++
|
|
2776
|
++
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
|
|
2777
|
++
update_tx_rec1 = res["tx_id"]
|
|
2778
|
++
await farm_block_with_spend(full_node_api, ph, update_tx_rec1, wallet_rpc_api)
|
|
2779
|
++
|
|
2780
|
++
history = await data_rpc_api.get_root_history({"id": store_id.hex()})
|
|
2781
|
++
hash1 = history["root_history"][1]["root_hash"]
|
|
2782
|
++
hash2 = history["root_history"][2]["root_hash"]
|
|
2783
|
++
diff_reference = {
|
|
2784
|
++
"total_pages": 3,
|
|
2785
|
++
"total_bytes": 13,
|
|
2786
|
++
"diff": [],
|
|
2787
|
++
}
|
|
2788
|
++
diff_res = await data_rpc_api.get_kv_diff(
|
|
2789
|
++
{
|
|
2790
|
++
"id": store_id.hex(),
|
|
2791
|
++
"hash_1": hash1.hex(),
|
|
2792
|
++
"hash_2": hash2.hex(),
|
|
2793
|
++
"page": 0,
|
|
2794
|
++
"max_page_size": 5,
|
|
2795
|
++
}
|
|
2796
|
++
)
|
|
2797
|
++
expected_diff = [{"type": "DELETE", "key": key3.hex(), "value": value3.hex()}]
|
|
2798
|
++
assert diff_res == {**diff_reference, "diff": expected_diff}
|
|
2799
|
++
|
|
2800
|
++
diff_res = await data_rpc_api.get_kv_diff(
|
|
2801
|
++
{
|
|
2802
|
++
"id": store_id.hex(),
|
|
2803
|
++
"hash_1": hash1.hex(),
|
|
2804
|
++
"hash_2": hash2.hex(),
|
|
2805
|
++
"page": 1,
|
|
2806
|
++
"max_page_size": 5,
|
|
2807
|
++
}
|
|
2808
|
++
)
|
|
2809
|
++
assert leaf_hash6 < leaf_hash7
|
|
2810
|
++
expected_diff = [{"type": "INSERT", "key": key6.hex(), "value": value6.hex()}]
|
|
2811
|
++
assert diff_res == {**diff_reference, "diff": expected_diff}
|
|
2812
|
++
|
|
2813
|
++
diff_res = await data_rpc_api.get_kv_diff(
|
|
2814
|
++
{
|
|
2815
|
++
"id": store_id.hex(),
|
|
2816
|
++
"hash_1": hash1.hex(),
|
|
2817
|
++
"hash_2": hash2.hex(),
|
|
2818
|
++
"page": 2,
|
|
2819
|
++
"max_page_size": 5,
|
|
2820
|
++
}
|
|
2821
|
++
)
|
|
2822
|
++
expected_diff = [{"type": "INSERT", "key": key7.hex(), "value": value7.hex()}]
|
|
2823
|
++
assert diff_res == {**diff_reference, "diff": expected_diff}
|
|
2824
|
++
|
|
2825
|
++
diff_res = await data_rpc_api.get_kv_diff(
|
|
2826
|
++
{
|
|
2827
|
++
"id": store_id.hex(),
|
|
2828
|
++
"hash_1": hash1.hex(),
|
|
2829
|
++
"hash_2": hash2.hex(),
|
|
2830
|
++
"page": 3,
|
|
2831
|
++
"max_page_size": 5,
|
|
2832
|
++
}
|
|
2833
|
++
)
|
|
2834
|
++
assert diff_res == diff_reference
|
|
2835
|
++
|
|
2836
|
++
diff_res = await data_rpc_api.get_kv_diff(
|
|
2837
|
++
{
|
|
2838
|
++
"id": store_id.hex(),
|
|
2839
|
++
"hash_1": hash1.hex(),
|
|
2840
|
++
"hash_2": bytes32([0] * 31 + [1]).hex(),
|
|
2841
|
++
"page": 0,
|
|
2842
|
++
"max_page_size": 10,
|
|
2843
|
++
}
|
|
2844
|
++
)
|
|
2845
|
++
empty_diff_reference = {
|
|
2846
|
++
"total_pages": 1,
|
|
2847
|
++
"total_bytes": 0,
|
|
2848
|
++
"diff": [],
|
|
2849
|
++
}
|
|
2850
|
++
assert diff_res == empty_diff_reference
|
|
2851
|
++
|
|
2852
|
++
diff_res = await data_rpc_api.get_kv_diff(
|
|
2853
|
++
{
|
|
2854
|
++
"id": store_id.hex(),
|
|
2855
|
++
"hash_1": bytes32([0] * 31 + [1]).hex(),
|
|
2856
|
++
"hash_2": hash2.hex(),
|
|
2857
|
++
"page": 0,
|
|
2858
|
++
"max_page_size": 10,
|
|
2859
|
++
}
|
|
2860
|
++
)
|
|
2861
|
++
assert diff_res == empty_diff_reference
|
|
2862
|
++
|
|
2863
|
++
new_value = b"\x02\x02"
|
|
2864
|
++
changelist = [{"action": "upsert", "key": key6.hex(), "value": new_value.hex()}]
|
|
2865
|
++
new_leaf_hash = leaf_hash(key6, new_value)
|
|
2866
|
++
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
|
|
2867
|
++
update_tx_rec3 = res["tx_id"]
|
|
2868
|
++
await farm_block_with_spend(full_node_api, ph, update_tx_rec3, wallet_rpc_api)
|
|
2869
|
++
|
|
2870
|
++
history = await data_rpc_api.get_root_history({"id": store_id.hex()})
|
|
2871
|
++
hash1 = history["root_history"][2]["root_hash"]
|
|
2872
|
++
hash2 = history["root_history"][3]["root_hash"]
|
|
2873
|
++
|
|
2874
|
++
diff_res = await data_rpc_api.get_kv_diff(
|
|
2875
|
++
{
|
|
2876
|
++
"id": store_id.hex(),
|
|
2877
|
++
"hash_1": hash1.hex(),
|
|
2878
|
++
"hash_2": hash2.hex(),
|
|
2879
|
++
"page": 0,
|
|
2880
|
++
"max_page_size": 100,
|
|
2881
|
++
}
|
|
2882
|
++
)
|
|
2883
|
++
assert leaf_hash6 < new_leaf_hash
|
|
2884
|
++
diff_reference = {
|
|
2885
|
++
"total_pages": 1,
|
|
2886
|
++
"total_bytes": 8,
|
|
2887
|
++
"diff": [
|
|
2888
|
++
{"type": "DELETE", "key": key6.hex(), "value": value6.hex()},
|
|
2889
|
++
{"type": "INSERT", "key": key6.hex(), "value": new_value.hex()},
|
|
2890
|
++
],
|
|
2891
|
++
}
|
|
2892
|
++
assert diff_res == diff_reference
|
|
2893
|
++
|
|
2894
|
++
with pytest.raises(Exception, match="Can't find keys"):
|
|
2895
|
++
await data_rpc_api.get_keys(
|
|
2896
|
++
{"id": store_id.hex(), "page": 0, "max_page_size": 100, "root_hash": bytes32([0] * 31 + [1]).hex()}
|
|
2897
|
++
)
|
|
2898
|
++
|
|
2899
|
++
with pytest.raises(Exception, match="Can't find keys and values"):
|
|
2900
|
++
await data_rpc_api.get_keys_values(
|
|
2901
|
++
{"id": store_id.hex(), "page": 0, "max_page_size": 100, "root_hash": bytes32([0] * 31 + [1]).hex()}
|
|
2902
|
++
)
|
|
2903
|
++
|
|
2904
|
++
with pytest.raises(RuntimeError, match="Cannot paginate data, item size is larger than max page size"):
|
|
2905
|
++
keys_paginated = await data_rpc_api.get_keys_values({"id": store_id.hex(), "page": 0, "max_page_size": 1})
|
|
2906
|
++
|
|
2907
|
++
with pytest.raises(RuntimeError, match="Cannot paginate data, item size is larger than max page size"):
|
|
2908
|
++
keys_values_paginated = await data_rpc_api.get_keys_values(
|
|
2909
|
++
{"id": store_id.hex(), "page": 0, "max_page_size": 1}
|
|
2910
|
++
)
|
|
2911
|
++
|
|
2912
|
++
with pytest.raises(RuntimeError, match="Cannot paginate data, item size is larger than max page size"):
|
|
2913
|
++
diff_res = await data_rpc_api.get_kv_diff(
|
|
2914
|
++
{
|
|
2915
|
++
"id": store_id.hex(),
|
|
2916
|
++
"hash_1": hash1.hex(),
|
|
2917
|
++
"hash_2": hash2.hex(),
|
|
2918
|
++
"page": 0,
|
|
2919
|
++
"max_page_size": 1,
|
|
2920
|
++
}
|
|
2921
|
++
)
|
|
2922
|
++
|
|
2923
|
++
|
|
2924
|
++
@pytest.mark.limit_consensus_modes(reason="does not depend on consensus rules")
|
|
2925
|
++
@pytest.mark.parametrize(argnames="layer", argvalues=[InterfaceLayer.funcs, InterfaceLayer.cli, InterfaceLayer.client])
|
|
2926
|
++
@pytest.mark.parametrize(argnames="max_page_size", argvalues=[5, 100, None])
|
|
2927
|
++
@pytest.mark.anyio
|
|
2928
|
++
async def test_pagination_cmds(
|
|
2929
|
++
self_hostname: str,
|
|
2930
|
++
one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices,
|
|
2931
|
++
tmp_path: Path,
|
|
2932
|
++
layer: InterfaceLayer,
|
|
2933
|
++
max_page_size: Optional[int],
|
|
2934
|
++
bt: BlockTools,
|
|
2935
|
++
) -> None:
|
|
2936
|
++
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
|
|
2937
|
++
self_hostname, one_wallet_and_one_simulator_services
|
|
2938
|
++
)
|
|
2939
|
++
async with init_data_layer_service(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer_service:
|
|
2940
|
++
assert data_layer_service.rpc_server is not None
|
|
2941
|
++
rpc_port = data_layer_service.rpc_server.listen_port
|
|
2942
|
++
data_layer = data_layer_service._api.data_layer
|
|
2943
|
++
data_rpc_api = DataLayerRpcApi(data_layer)
|
|
2944
|
++
|
|
2945
|
++
res = await data_rpc_api.create_data_store({})
|
|
2946
|
++
assert res is not None
|
|
2947
|
++
store_id = bytes32(hexstr_to_bytes(res["id"]))
|
|
2948
|
++
await farm_block_check_singleton(data_layer, full_node_api, ph, store_id, wallet=wallet_rpc_api.service)
|
|
2949
|
++
|
|
2950
|
++
key = b"aa"
|
|
2951
|
++
value = b"aa"
|
|
2952
|
++
key_2 = b"aaaa"
|
|
2953
|
++
value_2 = b"a"
|
|
2954
|
++
|
|
2955
|
++
changelist = [
|
|
2956
|
++
{"action": "insert", "key": key.hex(), "value": value.hex()},
|
|
2957
|
++
{"action": "insert", "key": key_2.hex(), "value": value_2.hex()},
|
|
2958
|
++
]
|
|
2959
|
++
|
|
2960
|
++
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
|
|
2961
|
++
update_tx_rec0 = res["tx_id"]
|
|
2962
|
++
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
|
|
2963
|
++
local_root = await data_rpc_api.get_local_root({"id": store_id.hex()})
|
|
2964
|
++
hash_1 = bytes32([0] * 32)
|
|
2965
|
++
hash_2 = local_root["hash"]
|
|
2966
|
++
# `InterfaceLayer.direct` is not tested here since test `test_pagination_rpcs` extensively use it.
|
|
2967
|
++
if layer == InterfaceLayer.funcs:
|
|
2968
|
++
keys = await get_keys_cmd(
|
|
2969
|
++
rpc_port=rpc_port,
|
|
2970
|
++
store_id="0x" + store_id.hex(),
|
|
2971
|
++
root_hash=None,
|
|
2972
|
++
fingerprint=None,
|
|
2973
|
++
page=0,
|
|
2974
|
++
max_page_size=max_page_size,
|
|
2975
|
++
root_path=bt.root_path,
|
|
2976
|
++
)
|
|
2977
|
++
keys_values = await get_keys_values_cmd(
|
|
2978
|
++
rpc_port=rpc_port,
|
|
2979
|
++
store_id="0x" + store_id.hex(),
|
|
2980
|
++
root_hash=None,
|
|
2981
|
++
fingerprint=None,
|
|
2982
|
++
page=0,
|
|
2983
|
++
max_page_size=max_page_size,
|
|
2984
|
++
root_path=bt.root_path,
|
|
2985
|
++
)
|
|
2986
|
++
kv_diff = await get_kv_diff_cmd(
|
|
2987
|
++
rpc_port=rpc_port,
|
|
2988
|
++
store_id="0x" + store_id.hex(),
|
|
2989
|
++
hash_1="0x" + hash_1.hex(),
|
|
2990
|
++
hash_2="0x" + hash_2.hex(),
|
|
2991
|
++
fingerprint=None,
|
|
2992
|
++
page=0,
|
|
2993
|
++
max_page_size=max_page_size,
|
|
2994
|
++
root_path=bt.root_path,
|
|
2995
|
++
)
|
|
2996
|
++
elif layer == InterfaceLayer.cli:
|
|
2997
|
++
for command in ("get_keys", "get_keys_values", "get_kv_diff"):
|
|
2998
|
++
if command == "get_keys" or command == "get_keys_values":
|
|
2999
|
++
args: List[str] = [
|
|
3000
|
++
sys.executable,
|
|
3001
|
++
"-m",
|
|
3002
|
++
"chia",
|
|
3003
|
++
"data",
|
|
3004
|
++
command,
|
|
3005
|
++
"--id",
|
|
3006
|
++
store_id.hex(),
|
|
3007
|
++
"--data-rpc-port",
|
|
3008
|
++
str(rpc_port),
|
|
3009
|
++
"--page",
|
|
3010
|
++
"0",
|
|
3011
|
++
]
|
|
3012
|
++
else:
|
|
3013
|
++
args = [
|
|
3014
|
++
sys.executable,
|
|
3015
|
++
"-m",
|
|
3016
|
++
"chia",
|
|
3017
|
++
"data",
|
|
3018
|
++
command,
|
|
3019
|
++
"--id",
|
|
3020
|
++
store_id.hex(),
|
|
3021
|
++
"--hash_1",
|
|
3022
|
++
"0x" + hash_1.hex(),
|
|
3023
|
++
"--hash_2",
|
|
3024
|
++
"0x" + hash_2.hex(),
|
|
3025
|
++
"--data-rpc-port",
|
|
3026
|
++
str(rpc_port),
|
|
3027
|
++
"--page",
|
|
3028
|
++
"0",
|
|
3029
|
++
]
|
|
3030
|
++
if max_page_size is not None:
|
|
3031
|
++
args.append("--max-page-size")
|
|
3032
|
++
args.append(f"{max_page_size}")
|
|
3033
|
++
process = await asyncio.create_subprocess_exec(
|
|
3034
|
++
*args,
|
|
3035
|
++
env={**os.environ, "CHIA_ROOT": str(bt.root_path)},
|
|
3036
|
++
stdout=asyncio.subprocess.PIPE,
|
|
3037
|
++
stderr=asyncio.subprocess.PIPE,
|
|
3038
|
++
)
|
|
3039
|
++
await process.wait()
|
|
3040
|
++
assert process.stdout is not None
|
|
3041
|
++
assert process.stderr is not None
|
|
3042
|
++
stdout = await process.stdout.read()
|
|
3043
|
++
stderr = await process.stderr.read()
|
|
3044
|
++
if command == "get_keys":
|
|
3045
|
++
keys = json.loads(stdout)
|
|
3046
|
++
elif command == "get_keys_values":
|
|
3047
|
++
keys_values = json.loads(stdout)
|
|
3048
|
++
else:
|
|
3049
|
++
kv_diff = json.loads(stdout)
|
|
3050
|
++
assert process.returncode == 0
|
|
3051
|
++
if sys.version_info >= (3, 10, 6):
|
|
3052
|
++
assert stderr == b""
|
|
3053
|
++
else: # pragma: no cover
|
|
3054
|
++
# https://github.com/python/cpython/issues/92841
|
|
3055
|
++
assert stderr == b"" or b"_ProactorBasePipeTransport.__del__" in stderr
|
|
3056
|
++
elif layer == InterfaceLayer.client:
|
|
3057
|
++
client = await DataLayerRpcClient.create(
|
|
3058
|
++
self_hostname=self_hostname,
|
|
3059
|
++
port=rpc_port,
|
|
3060
|
++
root_path=bt.root_path,
|
|
3061
|
++
net_config=bt.config,
|
|
3062
|
++
)
|
|
3063
|
++
try:
|
|
3064
|
++
keys = await client.get_keys(
|
|
3065
|
++
store_id=store_id,
|
|
3066
|
++
root_hash=None,
|
|
3067
|
++
page=0,
|
|
3068
|
++
max_page_size=max_page_size,
|
|
3069
|
++
)
|
|
3070
|
++
keys_values = await client.get_keys_values(
|
|
3071
|
++
store_id=store_id,
|
|
3072
|
++
root_hash=None,
|
|
3073
|
++
page=0,
|
|
3074
|
++
max_page_size=max_page_size,
|
|
3075
|
++
)
|
|
3076
|
++
kv_diff = await client.get_kv_diff(
|
|
3077
|
++
store_id=store_id,
|
|
3078
|
++
hash_1=hash_1,
|
|
3079
|
++
hash_2=hash_2,
|
|
3080
|
++
page=0,
|
|
3081
|
++
max_page_size=max_page_size,
|
|
3082
|
++
)
|
|
3083
|
++
finally:
|
|
3084
|
++
client.close()
|
|
3085
|
++
await client.await_closed()
|
|
3086
|
++
else: # pragma: no cover
|
|
3087
|
++
assert False, "unhandled parametrization"
|
|
3088
|
++
if max_page_size is None or max_page_size == 100:
|
|
3089
|
++
assert keys == {
|
|
3090
|
++
"keys": ["0x61616161", "0x6161"],
|
|
3091
|
++
"root_hash": "0x3f4ae7b8e10ef48b3114843537d5def989ee0a3b6568af7e720a71730f260fa1",
|
|
3092
|
++
"success": True,
|
|
3093
|
++
"total_bytes": 6,
|
|
3094
|
++
"total_pages": 1,
|
|
3095
|
++
}
|
|
3096
|
++
assert keys_values == {
|
|
3097
|
++
"keys_values": [
|
|
3098
|
++
{
|
|
3099
|
++
"atom": None,
|
|
3100
|
++
"hash": "0x3c8ecfd41a1c54820f5ad687a4cbfbad0faa78445cbf31ec4f879ce553216a9d",
|
|
3101
|
++
"key": "0x61616161",
|
|
3102
|
++
"value": "0x61",
|
|
3103
|
++
},
|
|
3104
|
++
{
|
|
3105
|
++
"atom": None,
|
|
3106
|
++
"hash": "0x5a7edd8e4bc28e32ba2a2514054f3872037a4f6da52c5a662969b6b881beaa3f",
|
|
3107
|
++
"key": "0x6161",
|
|
3108
|
++
"value": "0x6161",
|
|
3109
|
++
},
|
|
3110
|
++
],
|
|
3111
|
++
"root_hash": "0x3f4ae7b8e10ef48b3114843537d5def989ee0a3b6568af7e720a71730f260fa1",
|
|
3112
|
++
"success": True,
|
|
3113
|
++
"total_bytes": 9,
|
|
3114
|
++
"total_pages": 1,
|
|
3115
|
++
}
|
|
3116
|
++
assert kv_diff == {
|
|
3117
|
++
"diff": [
|
|
3118
|
++
{"key": "61616161", "type": "INSERT", "value": "61"},
|
|
3119
|
++
{"key": "6161", "type": "INSERT", "value": "6161"},
|
|
3120
|
++
],
|
|
3121
|
++
"success": True,
|
|
3122
|
++
"total_bytes": 9,
|
|
3123
|
++
"total_pages": 1,
|
|
3124
|
++
}
|
|
3125
|
++
elif max_page_size == 5:
|
|
3126
|
++
assert keys == {
|
|
3127
|
++
"keys": ["0x61616161"],
|
|
3128
|
++
"root_hash": "0x3f4ae7b8e10ef48b3114843537d5def989ee0a3b6568af7e720a71730f260fa1",
|
|
3129
|
++
"success": True,
|
|
3130
|
++
"total_bytes": 6,
|
|
3131
|
++
"total_pages": 2,
|
|
3132
|
++
}
|
|
3133
|
++
assert keys_values == {
|
|
3134
|
++
"keys_values": [
|
|
3135
|
++
{
|
|
3136
|
++
"atom": None,
|
|
3137
|
++
"hash": "0x3c8ecfd41a1c54820f5ad687a4cbfbad0faa78445cbf31ec4f879ce553216a9d",
|
|
3138
|
++
"key": "0x61616161",
|
|
3139
|
++
"value": "0x61",
|
|
3140
|
++
}
|
|
3141
|
++
],
|
|
3142
|
++
"root_hash": "0x3f4ae7b8e10ef48b3114843537d5def989ee0a3b6568af7e720a71730f260fa1",
|
|
3143
|
++
"success": True,
|
|
3144
|
++
"total_bytes": 9,
|
|
3145
|
++
"total_pages": 2,
|
|
3146
|
++
}
|
|
3147
|
++
assert kv_diff == {
|
|
3148
|
++
"diff": [
|
|
3149
|
++
{"key": "61616161", "type": "INSERT", "value": "61"},
|
|
3150
|
++
],
|
|
3151
|
++
"success": True,
|
|
3152
|
++
"total_bytes": 9,
|
|
3153
|
++
"total_pages": 2,
|
|
3154
|
++
}
|
|
3155
|
++
else: # pragma: no cover
|
|
3156
|
++
assert False, "unhandled parametrization"
|
|
@@@ -308,7 -314,7 +308,8 @@@ async def test_get_ancestors_optimized(
|
|
|
308
308
|
if i > 25 and i <= 200 and random.randint(0, 4):
|
|
309
309
|
is_insert = True
|
|
310
310
|
if i > 200:
|
|
311
|
-
|
|
312
|
-
hint_keys_values =
|
|
311
|
++
kv_compressed = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
|
312
|
++
hint_keys_values = kv_compressed.keys_values_hashed
|
|
313
313
|
if not deleted_all:
|
|
314
314
|
while node_count > 0:
|
|
315
315
|
node_count -= 1
|
|
@@@ -643,7 -559,7 +644,8 @@@ async def test_inserting_duplicate_key_
|
|
|
643
644
|
side=Side.RIGHT,
|
|
644
645
|
)
|
|
645
646
|
|
|
646
|
-
|
|
647
|
-
hint_keys_values =
|
|
647
|
++
kv_compressed = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
|
648
|
++
hint_keys_values = kv_compressed.keys_values_hashed
|
|
648
649
|
# TODO: more specific exception
|
|
649
650
|
with pytest.raises(Exception):
|
|
650
651
|
await data_store.insert(
|
|
@@@ -749,7 -665,7 +751,8 @@@ async def test_delete_from_left_both_te
|
|
|
749
751
|
|
|
750
752
|
hint_keys_values = None
|
|
751
753
|
if use_hint:
|
|
752
|
-
|
|
753
|
-
hint_keys_values =
|
|
754
|
++
kv_compressed = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
|
755
|
++
hint_keys_values = kv_compressed.keys_values_hashed
|
|
754
756
|
|
|
755
757
|
expected = Program.to(
|
|
756
758
|
(
|
|
@@@ -789,7 -707,7 +792,8 @@@ async def test_delete_from_left_other_n
|
|
|
789
792
|
|
|
790
793
|
hint_keys_values = None
|
|
791
794
|
if use_hint:
|
|
792
|
-
|
|
793
|
-
hint_keys_values =
|
|
795
|
++
kv_compressed = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
|
796
|
++
hint_keys_values = kv_compressed.keys_values_hashed
|
|
794
797
|
|
|
795
798
|
expected = Program.to(
|
|
796
799
|
(
|
|
@@@ -827,7 -749,7 +831,8 @@@ async def test_delete_from_right_both_t
|
|
|
827
831
|
|
|
828
832
|
hint_keys_values = None
|
|
829
833
|
if use_hint:
|
|
830
|
-
|
|
831
|
-
hint_keys_values =
|
|
834
|
++
kv_compressed = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
|
835
|
++
hint_keys_values = kv_compressed.keys_values_hashed
|
|
832
836
|
|
|
833
837
|
expected = Program.to(
|
|
834
838
|
(
|
|
@@@ -867,7 -791,7 +872,8 @@@ async def test_delete_from_right_other_
|
|
|
867
872
|
|
|
868
873
|
hint_keys_values = None
|
|
869
874
|
if use_hint:
|
|
870
|
-
|
|
871
|
-
hint_keys_values =
|
|
875
|
++
kv_compressed = await data_store.get_keys_values_compressed(tree_id=tree_id)
|
|
876
|
++
hint_keys_values = kv_compressed.keys_values_hashed
|
|
872
877
|
|
|
873
878
|
expected = Program.to(
|
|
874
879
|
(
|
|
@@@ -499,341 -493,8 +499,341 @@@ async def test_get_coin_states(db_versi
|
|
|
499
499
|
assert len(await coin_store.get_coin_states_by_ids(True, coins, uint32(0), max_items=10000)) == 600
|
|
500
500
|
|
|
501
501
|
|
|
502
|
+
@dataclass(frozen=True)
|
|
503
|
+
class RandomCoinRecords:
|
|
504
|
+
items: List[CoinRecord]
|
|
505
|
+
puzzle_hashes: List[bytes32]
|
|
506
|
+
hints: List[Tuple[bytes32, bytes]]
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
@pytest.fixture(scope="session")
|
|
510
|
+
def random_coin_records() -> RandomCoinRecords:
|
|
511
|
+
coin_records: List[CoinRecord] = []
|
|
512
|
+
puzzle_hashes: List[bytes32] = []
|
|
513
|
+
hints: List[Tuple[bytes32, bytes]] = []
|
|
514
|
+
|
|
515
|
+
for i in range(50000):
|
|
516
|
+
is_spent = i % 2 == 0
|
|
517
|
+
is_hinted = i % 7 == 0
|
|
518
|
+
created_height = uint32(i)
|
|
519
|
+
spent_height = uint32(created_height + 100)
|
|
520
|
+
|
|
521
|
+
puzzle_hash = std_hash(i.to_bytes(4, byteorder="big"))
|
|
522
|
+
|
|
523
|
+
coin = Coin(
|
|
524
|
+
std_hash(b"Parent Coin Id " + i.to_bytes(4, byteorder="big")),
|
|
525
|
+
puzzle_hash,
|
|
526
|
+
uint64(1000),
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
if is_hinted:
|
|
530
|
+
hint = std_hash(b"Hinted " + puzzle_hash)
|
|
531
|
+
hints.append((coin.name(), hint))
|
|
532
|
+
puzzle_hashes.append(hint)
|
|
533
|
+
else:
|
|
534
|
+
puzzle_hashes.append(puzzle_hash)
|
|
535
|
+
|
|
536
|
+
coin_records.append(
|
|
537
|
+
CoinRecord(
|
|
538
|
+
coin=coin,
|
|
539
|
+
confirmed_block_index=created_height,
|
|
540
|
+
spent_block_index=spent_height if is_spent else uint32(0),
|
|
541
|
+
coinbase=False,
|
|
542
|
+
timestamp=uint64(0),
|
|
543
|
+
)
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
coin_records.sort(key=lambda cr: max(cr.confirmed_block_index, cr.spent_block_index))
|
|
547
|
+
|
|
548
|
+
return RandomCoinRecords(coin_records, puzzle_hashes, hints)
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
@pytest.mark.anyio
|
|
552
|
+
@pytest.mark.parametrize("include_spent", [True, False])
|
|
553
|
+
@pytest.mark.parametrize("include_unspent", [True, False])
|
|
554
|
+
@pytest.mark.parametrize("include_hinted", [True, False])
|
|
555
|
+
async def test_coin_state_batches(
|
|
556
|
+
db_version: int,
|
|
557
|
+
random_coin_records: RandomCoinRecords,
|
|
558
|
+
include_spent: bool,
|
|
559
|
+
include_unspent: bool,
|
|
560
|
+
include_hinted: bool,
|
|
561
|
+
) -> None:
|
|
562
|
+
async with DBConnection(db_version) as db_wrapper:
|
|
563
|
+
# Initialize coin and hint stores.
|
|
564
|
+
coin_store = await CoinStore.create(db_wrapper)
|
|
565
|
+
hint_store = await HintStore.create(db_wrapper)
|
|
566
|
+
|
|
567
|
+
await coin_store._add_coin_records(random_coin_records.items)
|
|
568
|
+
await hint_store.add_hints(random_coin_records.hints)
|
|
569
|
+
|
|
570
|
+
# Make sure all of the coin states are found when batching.
|
|
571
|
+
ph_set = set(random_coin_records.puzzle_hashes)
|
|
572
|
+
expected_crs = []
|
|
573
|
+
for cr in random_coin_records.items:
|
|
574
|
+
if cr.spent_block_index == 0 and not include_unspent:
|
|
575
|
+
continue
|
|
576
|
+
if cr.spent_block_index > 0 and not include_spent:
|
|
577
|
+
continue
|
|
578
|
+
if cr.coin.puzzle_hash not in ph_set and not include_hinted:
|
|
579
|
+
continue
|
|
580
|
+
expected_crs.append(cr)
|
|
581
|
+
|
|
582
|
+
height: Optional[uint32] = uint32(0)
|
|
583
|
+
all_coin_states: List[CoinState] = []
|
|
584
|
+
remaining_phs = random_coin_records.puzzle_hashes.copy()
|
|
585
|
+
|
|
586
|
+
def height_of(coin_state: CoinState) -> int:
|
|
587
|
+
return max(coin_state.created_height or 0, coin_state.spent_height or 0)
|
|
588
|
+
|
|
589
|
+
while height is not None:
|
|
590
|
+
(coin_states, height) = await coin_store.batch_coin_states_by_puzzle_hashes(
|
|
591
|
+
remaining_phs[:15000],
|
|
592
|
+
min_height=height,
|
|
593
|
+
include_spent=include_spent,
|
|
594
|
+
include_unspent=include_unspent,
|
|
595
|
+
include_hinted=include_hinted,
|
|
596
|
+
)
|
|
597
|
+
|
|
598
|
+
# Ensure that all of the returned coin states are in order.
|
|
599
|
+
assert all(height_of(coin_states[i]) <= height_of(coin_states[i + 1]) for i in range(len(coin_states) - 1))
|
|
600
|
+
|
|
601
|
+
all_coin_states += coin_states
|
|
602
|
+
|
|
603
|
+
if height is None:
|
|
604
|
+
remaining_phs = remaining_phs[15000:]
|
|
605
|
+
|
|
606
|
+
if len(remaining_phs) > 0:
|
|
607
|
+
height = uint32(0)
|
|
608
|
+
|
|
609
|
+
assert len(all_coin_states) == len(expected_crs)
|
|
610
|
+
|
|
611
|
+
all_coin_states.sort(key=height_of)
|
|
612
|
+
|
|
613
|
+
for i in range(len(expected_crs)):
|
|
614
|
+
actual = all_coin_states[i]
|
|
615
|
+
expected = expected_crs[i]
|
|
616
|
+
|
|
617
|
+
assert actual.coin == expected.coin, i
|
|
618
|
+
assert uint32(actual.created_height or 0) == expected.confirmed_block_index, i
|
|
619
|
+
assert uint32(actual.spent_height or 0) == expected.spent_block_index, i
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
@pytest.mark.anyio
|
|
623
|
+
@pytest.mark.parametrize("cut_off_middle", [True, False])
|
|
624
|
+
async def test_batch_many_coin_states(db_version: int, cut_off_middle: bool) -> None:
|
|
625
|
+
async with DBConnection(db_version) as db_wrapper:
|
|
626
|
+
ph = bytes32(b"0" * 32)
|
|
627
|
+
|
|
628
|
+
# Generate coin records.
|
|
629
|
+
coin_records: List[CoinRecord] = []
|
|
630
|
+
count = 50000
|
|
631
|
+
|
|
632
|
+
for i in range(count):
|
|
633
|
+
# Create coin records at either height 10 or 12.
|
|
634
|
+
created_height = uint32((i % 2) * 2 + 10)
|
|
635
|
+
coin = Coin(
|
|
636
|
+
std_hash(b"Parent Coin Id " + i.to_bytes(4, byteorder="big")),
|
|
637
|
+
ph,
|
|
638
|
+
uint64(i),
|
|
639
|
+
)
|
|
640
|
+
coin_records.append(
|
|
641
|
+
CoinRecord(
|
|
642
|
+
coin=coin,
|
|
643
|
+
confirmed_block_index=created_height,
|
|
644
|
+
spent_block_index=uint32(0),
|
|
645
|
+
coinbase=False,
|
|
646
|
+
timestamp=uint64(0),
|
|
647
|
+
)
|
|
648
|
+
)
|
|
649
|
+
|
|
650
|
+
# Initialize coin and hint stores.
|
|
651
|
+
coin_store = await CoinStore.create(db_wrapper)
|
|
652
|
+
await HintStore.create(db_wrapper)
|
|
653
|
+
|
|
654
|
+
await coin_store._add_coin_records(coin_records)
|
|
655
|
+
|
|
656
|
+
# Make sure all of the coin states are found.
|
|
657
|
+
(all_coin_states, next_height) = await coin_store.batch_coin_states_by_puzzle_hashes([ph])
|
|
658
|
+
all_coin_states.sort(key=lambda cs: cs.coin.amount)
|
|
659
|
+
|
|
660
|
+
assert next_height is None
|
|
661
|
+
assert len(all_coin_states) == len(coin_records)
|
|
662
|
+
|
|
663
|
+
for i in range(min(len(coin_records), len(all_coin_states))):
|
|
664
|
+
assert coin_records[i].coin.name().hex() == all_coin_states[i].coin.name().hex(), i
|
|
665
|
+
|
|
666
|
+
# For the middle case, insert a coin record between the two heights 10 and 12.
|
|
667
|
+
await coin_store._add_coin_records(
|
|
668
|
+
[
|
|
669
|
+
CoinRecord(
|
|
502
|
-
coin=Coin(std_hash(b"extra coin"), ph, 0),
|
|
670
|
++
coin=Coin(std_hash(b"extra coin"), ph, uint64(0)),
|
|
671
|
+
# Insert a coin record in the middle between heights 10 and 12.
|
|
672
|
+
# Or after all of the other coins if testing the batch limit.
|
|
673
|
+
confirmed_block_index=uint32(11 if cut_off_middle else 50),
|
|
674
|
+
spent_block_index=uint32(0),
|
|
675
|
+
coinbase=False,
|
|
676
|
+
timestamp=uint64(0),
|
|
677
|
+
)
|
|
678
|
+
]
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
(all_coin_states, next_height) = await coin_store.batch_coin_states_by_puzzle_hashes([ph])
|
|
682
|
+
|
|
683
|
+
# Make sure that the extra coin records are not included in the results.
|
|
684
|
+
assert next_height == (12 if cut_off_middle else 50)
|
|
685
|
+
assert len(all_coin_states) == (25001 if cut_off_middle else 50000)
|
|
686
|
+
|
|
687
|
+
|
|
503
688
|
@pytest.mark.anyio
|
|
504
689
|
async def test_unsupported_version() -> None:
|
|
505
690
|
with pytest.raises(RuntimeError, match="CoinStore does not support database schema v1"):
|
|
506
691
|
async with DBConnection(1) as db_wrapper:
|
|
507
692
|
await CoinStore.create(db_wrapper)
|
|
693
|
+
|
|
694
|
+
|
|
695
|
+
TEST_COIN_ID = b"c" * 32
|
|
696
|
+
TEST_PUZZLEHASH = b"p" * 32
|
|
508
|
-
TEST_AMOUNT = 1337
|
|
697
|
++
TEST_AMOUNT = uint64(1337)
|
|
698
|
+
TEST_PARENT_ID = Coin(b"a" * 32, TEST_PUZZLEHASH, TEST_AMOUNT).name()
|
|
509
|
-
TEST_PARENT_DIFFERENT_AMOUNT = 5
|
|
699
|
++
TEST_PARENT_DIFFERENT_AMOUNT = uint64(5)
|
|
700
|
+
TEST_PARENT_ID_DIFFERENT_AMOUNT = Coin(b"a" * 32, TEST_PUZZLEHASH, TEST_PARENT_DIFFERENT_AMOUNT).name()
|
|
701
|
+
TEST_PARENT_PARENT_ID = b"f" * 32
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
@dataclass(frozen=True)
|
|
705
|
+
class UnspentLineageInfoTestItem:
|
|
706
|
+
coin_id: bytes
|
|
707
|
+
puzzlehash: bytes
|
|
708
|
+
amount: int
|
|
709
|
+
parent_id: bytes
|
|
710
|
+
is_spent: bool = False
|
|
711
|
+
|
|
712
|
+
|
|
713
|
+
@dataclass
|
|
714
|
+
class UnspentLineageInfoCase:
|
|
715
|
+
id: str
|
|
716
|
+
items: List[UnspentLineageInfoTestItem]
|
|
717
|
+
expected_success: bool
|
|
718
|
+
parent_with_diff_amount: bool = False
|
|
719
|
+
marks: Marks = ()
|
|
720
|
+
|
|
721
|
+
|
|
722
|
+
@pytest.mark.anyio
|
|
723
|
+
@datacases(
|
|
724
|
+
UnspentLineageInfoCase(
|
|
725
|
+
id="Unspent with parent that has same amount but different puzzlehash",
|
|
726
|
+
items=[
|
|
727
|
+
UnspentLineageInfoTestItem(TEST_COIN_ID, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_ID),
|
|
728
|
+
UnspentLineageInfoTestItem(b"2" * 32, b"2" * 32, 2, b"1" * 32),
|
|
729
|
+
UnspentLineageInfoTestItem(b"3" * 32, b"3" * 32, 3, b"2" * 32),
|
|
730
|
+
UnspentLineageInfoTestItem(TEST_PARENT_ID, b"4" * 32, TEST_AMOUNT, TEST_PARENT_PARENT_ID, is_spent=True),
|
|
731
|
+
],
|
|
732
|
+
expected_success=False,
|
|
733
|
+
),
|
|
734
|
+
UnspentLineageInfoCase(
|
|
735
|
+
id="Unspent with parent that has same puzzlehash but different amount",
|
|
736
|
+
items=[
|
|
737
|
+
UnspentLineageInfoTestItem(TEST_COIN_ID, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_ID_DIFFERENT_AMOUNT),
|
|
738
|
+
UnspentLineageInfoTestItem(b"2" * 32, b"2" * 32, 2, b"1" * 32),
|
|
739
|
+
UnspentLineageInfoTestItem(b"3" * 32, b"3" * 32, 3, b"2" * 32),
|
|
740
|
+
UnspentLineageInfoTestItem(
|
|
741
|
+
TEST_PARENT_ID_DIFFERENT_AMOUNT,
|
|
742
|
+
TEST_PUZZLEHASH,
|
|
743
|
+
TEST_PARENT_DIFFERENT_AMOUNT,
|
|
744
|
+
TEST_PARENT_PARENT_ID,
|
|
745
|
+
is_spent=True,
|
|
746
|
+
),
|
|
747
|
+
],
|
|
748
|
+
parent_with_diff_amount=True,
|
|
749
|
+
expected_success=True,
|
|
750
|
+
),
|
|
751
|
+
UnspentLineageInfoCase(
|
|
752
|
+
id="Unspent with parent that has same puzzlehash and amount but is also unspent",
|
|
753
|
+
items=[
|
|
754
|
+
UnspentLineageInfoTestItem(TEST_COIN_ID, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_ID),
|
|
755
|
+
UnspentLineageInfoTestItem(b"2" * 32, b"2" * 32, 2, b"1" * 32),
|
|
756
|
+
UnspentLineageInfoTestItem(b"3" * 32, b"3" * 32, 3, b"2" * 32),
|
|
757
|
+
UnspentLineageInfoTestItem(TEST_PARENT_ID, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_PARENT_ID),
|
|
758
|
+
],
|
|
759
|
+
expected_success=False,
|
|
760
|
+
),
|
|
761
|
+
UnspentLineageInfoCase(
|
|
762
|
+
id="More than one unspent with parent that has same puzzlehash and amount",
|
|
763
|
+
items=[
|
|
764
|
+
UnspentLineageInfoTestItem(TEST_COIN_ID, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_ID),
|
|
765
|
+
UnspentLineageInfoTestItem(b"2" * 32, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_ID),
|
|
766
|
+
UnspentLineageInfoTestItem(b"3" * 32, b"3" * 32, 3, b"2" * 32),
|
|
767
|
+
UnspentLineageInfoTestItem(
|
|
768
|
+
TEST_PARENT_ID, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_PARENT_ID, is_spent=True
|
|
769
|
+
),
|
|
770
|
+
],
|
|
771
|
+
expected_success=False,
|
|
772
|
+
),
|
|
773
|
+
UnspentLineageInfoCase(
|
|
774
|
+
id="Unspent with parent that has same puzzlehash and amount",
|
|
775
|
+
items=[
|
|
776
|
+
UnspentLineageInfoTestItem(TEST_COIN_ID, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_ID),
|
|
777
|
+
UnspentLineageInfoTestItem(b"2" * 32, b"2" * 32, 2, b"1" * 32),
|
|
778
|
+
UnspentLineageInfoTestItem(b"3" * 32, b"3" * 32, 3, b"2" * 32),
|
|
779
|
+
UnspentLineageInfoTestItem(
|
|
780
|
+
TEST_PARENT_ID, TEST_PUZZLEHASH, TEST_AMOUNT, TEST_PARENT_PARENT_ID, is_spent=True
|
|
781
|
+
),
|
|
782
|
+
],
|
|
783
|
+
expected_success=True,
|
|
784
|
+
),
|
|
785
|
+
)
|
|
786
|
+
async def test_get_unspent_lineage_info_for_puzzle_hash(case: UnspentLineageInfoCase) -> None:
|
|
787
|
+
CoinRecordRawData = Tuple[
|
|
788
|
+
bytes, # coin_name (blob)
|
|
789
|
+
int, # confirmed_index (bigint)
|
|
790
|
+
int, # spent_index (bigint)
|
|
791
|
+
int, # coinbase (int)
|
|
792
|
+
bytes, # puzzle_hash (blob)
|
|
793
|
+
bytes, # coin_parent (blob)
|
|
794
|
+
bytes, # amount (blob)
|
|
795
|
+
int, # timestamp (bigint)
|
|
796
|
+
]
|
|
797
|
+
|
|
798
|
+
def make_test_data(test_items: List[UnspentLineageInfoTestItem]) -> List[CoinRecordRawData]:
|
|
799
|
+
test_data = []
|
|
800
|
+
for item in test_items:
|
|
801
|
+
test_data.append(
|
|
802
|
+
(
|
|
803
|
+
item.coin_id,
|
|
804
|
+
0,
|
|
805
|
+
1 if item.is_spent else 0,
|
|
806
|
+
0,
|
|
807
|
+
item.puzzlehash,
|
|
808
|
+
item.parent_id,
|
|
809
|
+
int_to_bytes(item.amount),
|
|
810
|
+
0,
|
|
811
|
+
)
|
|
812
|
+
)
|
|
813
|
+
return test_data
|
|
814
|
+
|
|
815
|
+
async with DBConnection(2) as db_wrapper:
|
|
816
|
+
# Prepare the coin store with the test case's data
|
|
817
|
+
coin_store = await CoinStore.create(db_wrapper)
|
|
818
|
+
async with db_wrapper.writer() as writer:
|
|
819
|
+
for item in make_test_data(case.items):
|
|
820
|
+
await writer.execute(
|
|
821
|
+
"INSERT INTO coin_record "
|
|
822
|
+
"(coin_name, confirmed_index, spent_index, coinbase, puzzle_hash, coin_parent, amount, timestamp) "
|
|
823
|
+
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
|
824
|
+
item,
|
|
825
|
+
)
|
|
826
|
+
# Run the test case
|
|
827
|
+
result = await coin_store.get_unspent_lineage_info_for_puzzle_hash(bytes32(TEST_PUZZLEHASH))
|
|
828
|
+
if case.expected_success:
|
|
829
|
+
assert result == UnspentLineageInfo(
|
|
830
|
+
coin_id=bytes32(TEST_COIN_ID),
|
|
831
|
+
coin_amount=TEST_AMOUNT,
|
|
832
|
+
parent_id=bytes32(TEST_PARENT_ID_DIFFERENT_AMOUNT)
|
|
833
|
+
if case.parent_with_diff_amount
|
|
834
|
+
else bytes32(TEST_PARENT_ID),
|
|
835
|
+
parent_amount=TEST_PARENT_DIFFERENT_AMOUNT if case.parent_with_diff_amount else TEST_AMOUNT,
|
|
836
|
+
parent_parent_id=bytes32(TEST_PARENT_PARENT_ID),
|
|
837
|
+
)
|
|
838
|
+
else:
|
|
839
|
+
assert result is None
|
|
@@@ -137,7 -145,7 +137,7 @@@ class TestConditions
|
|
|
137
137
|
async def test_unknown_conditions_with_cost(
|
|
138
138
|
self, opcode: int, expected_cost: int, bt: BlockTools, consensus_mode: ConsensusMode
|
|
139
139
|
) -> None:
|
|
140
|
--
conditions = Program.to(assemble(f"(({opcode} 1337))"))
|
|
140
|
++
conditions = Program.to(assemble(f"(({opcode} 1337))"))
|
|
141
141
|
additions, removals, new_block = await check_conditions(bt, conditions)
|
|
142
142
|
|
|
143
143
|
if consensus_mode != ConsensusMode.HARD_FORK_2_0:
|
|
@@@ -164,7 -172,7 +164,7 @@@
|
|
|
164
164
|
async def test_softfork_condition(
|
|
165
165
|
self, condition: str, expected_cost: int, bt: BlockTools, consensus_mode: ConsensusMode
|
|
166
166
|
) -> None:
|
|
167
|
--
conditions = Program.to(assemble(condition))
|
|
167
|
++
conditions = Program.to(assemble(condition))
|
|
168
168
|
additions, removals, new_block = await check_conditions(bt, conditions)
|
|
169
169
|
|
|
170
170
|
if consensus_mode != ConsensusMode.HARD_FORK_2_0:
|
|
@@@ -267,56 -275,56 +267,48 @@@
|
|
|
267
267
|
],
|
|
268
268
|
)
|
|
269
269
|
async def test_condition(self, opcode: ConditionOpcode, value: int, expected: Err, bt: BlockTools) -> None:
|
|
270
|
--
conditions = Program.to(assemble(f"(({opcode[0]} {value}))"))
|
|
270
|
++
conditions = Program.to(assemble(f"(({opcode[0]} {value}))"))
|
|
271
271
|
await check_conditions(bt, conditions, expected_err=expected)
|
|
272
272
|
|
|
273
273
|
@pytest.mark.anyio
|
|
274
274
|
async def test_invalid_my_id(self, bt: BlockTools) -> None:
|
|
275
275
|
blocks = await initial_blocks(bt)
|
|
276
|
-
coin =
|
|
276
|
+
coin = blocks[-2].get_included_reward_coins()[0]
|
|
277
277
|
wrong_name = bytearray(coin.name())
|
|
278
278
|
wrong_name[-1] ^= 1
|
|
279
|
--
conditions = Program.to(
|
|
280
|
--
assemble(
|
|
281
|
--
f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{wrong_name.hex()}))"
|
|
282
|
--
) # type: ignore[no-untyped-call]
|
|
283
|
--
)
|
|
279
|
++
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{wrong_name.hex()}))"))
|
|
284
280
|
await check_conditions(bt, conditions, expected_err=Err.ASSERT_MY_COIN_ID_FAILED)
|
|
285
281
|
|
|
286
282
|
@pytest.mark.anyio
|
|
287
283
|
async def test_valid_my_id(self, bt: BlockTools) -> None:
|
|
288
284
|
blocks = await initial_blocks(bt)
|
|
289
|
-
coin =
|
|
290
|
-
conditions = Program.to(
|
|
291
|
-
assemble(
|
|
292
|
-
f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{coin.name().hex()}))"
|
|
293
|
-
) # type: ignore[no-untyped-call]
|
|
294
|
-
)
|
|
285
|
+
coin = blocks[-2].get_included_reward_coins()[0]
|
|
295
|
-
conditions = Program.to(
|
|
296
|
-
assemble(
|
|
297
|
-
f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{coin.name().hex()}))"
|
|
298
|
-
) # type: ignore[no-untyped-call]
|
|
299
|
-
)
|
|
286
|
++
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{coin.name().hex()}))"))
|
|
300
287
|
await check_conditions(bt, conditions)
|
|
301
288
|
|
|
302
289
|
@pytest.mark.anyio
|
|
303
290
|
async def test_invalid_coin_announcement(self, bt: BlockTools) -> None:
|
|
304
291
|
blocks = await initial_blocks(bt)
|
|
305
|
-
coin =
|
|
292
|
+
coin = blocks[-2].get_included_reward_coins()[0]
|
|
306
293
|
announce = AssertCoinAnnouncement(asserted_id=coin.name(), asserted_msg=b"test_bad")
|
|
307
294
|
conditions = Program.to(
|
|
308
|
-
assemble(
|
|
295
|
+
assemble(
|
|
309
296
|
f"(({ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]} 'test')"
|
|
310
297
|
f"({ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]} 0x{announce.msg_calc.hex()}))"
|
|
311
|
-
)
|
|
298
|
+
)
|
|
312
299
|
)
|
|
313
300
|
await check_conditions(bt, conditions, expected_err=Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
|
|
314
301
|
|
|
315
302
|
@pytest.mark.anyio
|
|
316
303
|
async def test_valid_coin_announcement(self, bt: BlockTools) -> None:
|
|
317
304
|
blocks = await initial_blocks(bt)
|
|
318
|
-
coin =
|
|
305
|
+
coin = blocks[-2].get_included_reward_coins()[0]
|
|
319
306
|
announce = AssertCoinAnnouncement(asserted_id=coin.name(), asserted_msg=b"test")
|
|
320
307
|
conditions = Program.to(
|
|
321
|
-
assemble(
|
|
308
|
+
assemble(
|
|
322
309
|
f"(({ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]} 'test')"
|
|
323
310
|
f"({ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]} 0x{announce.msg_calc.hex()}))"
|
|
324
|
-
)
|
|
311
|
+
)
|
|
325
312
|
)
|
|
326
313
|
await check_conditions(bt, conditions)
|
|
327
314
|
|
|
@@@ -324,10 -332,10 +316,10 @@@
|
|
|
324
316
|
async def test_invalid_puzzle_announcement(self, bt: BlockTools) -> None:
|
|
325
317
|
announce = AssertPuzzleAnnouncement(asserted_ph=EASY_PUZZLE_HASH, asserted_msg=b"test_bad")
|
|
326
318
|
conditions = Program.to(
|
|
327
|
-
assemble(
|
|
319
|
+
assemble(
|
|
328
320
|
f"(({ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]} 'test')"
|
|
329
321
|
f"({ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]} 0x{announce.msg_calc.hex()}))"
|
|
330
|
-
)
|
|
322
|
+
)
|
|
331
323
|
)
|
|
332
324
|
await check_conditions(bt, conditions, expected_err=Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
|
|
333
325
|
|
|
@@@ -335,10 -343,10 +327,10 @@@
|
|
|
335
327
|
async def test_valid_puzzle_announcement(self, bt: BlockTools) -> None:
|
|
336
328
|
announce = AssertPuzzleAnnouncement(asserted_ph=EASY_PUZZLE_HASH, asserted_msg=b"test")
|
|
337
329
|
conditions = Program.to(
|
|
338
|
-
assemble(
|
|
330
|
+
assemble(
|
|
339
331
|
f"(({ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]} 'test')"
|
|
340
332
|
f"({ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]} 0x{announce.msg_calc.hex()}))"
|
|
341
|
-
)
|
|
333
|
+
)
|
|
342
334
|
)
|
|
343
335
|
await check_conditions(bt, conditions)
|
|
344
336
|
|
|
@@@ -387,7 -400,7 +379,7 @@@
|
|
|
387
379
|
|
|
388
380
|
conditions = b""
|
|
389
381
|
if prefix != "":
|
|
390
|
--
conditions += b"\xff" + assemble(prefix).as_bin()
|
|
382
|
++
conditions += b"\xff" + assemble(prefix).as_bin()
|
|
391
383
|
|
|
392
384
|
cond = condition.format(
|
|
393
385
|
coin="0x" + coin.name().hex(),
|
|
@@@ -396,7 -409,7 +388,7 @@@
|
|
|
396
388
|
pann="0x" + puzzle_announcement.msg_calc.hex(),
|
|
397
389
|
)
|
|
398
390
|
|
|
399
|
--
conditions += (b"\xff" + assemble(cond).as_bin()) * num
|
|
391
|
++
conditions += (b"\xff" + assemble(cond).as_bin()) * num
|
|
400
392
|
conditions += b"\x80"
|
|
401
393
|
conditions_program = Program.from_bytes(conditions)
|
|
402
394
|
|
|
@@@ -17,15 -19,80 +17,15 @@@ from tests.blockchain.blockchain_test_u
|
|
|
17
17
|
coin_ids = [std_hash(i.to_bytes(4, "big")) for i in range(10)]
|
|
18
18
|
parent_ids = [std_hash(i.to_bytes(4, "big")) for i in range(10)]
|
|
19
19
|
phs = [std_hash(i.to_bytes(4, "big")) for i in range(10)]
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
coin_ids[0],
|
|
23
|
-
parent_ids[0],
|
|
24
|
-
phs[0],
|
|
25
|
-
123,
|
|
26
|
-
None,
|
|
27
|
-
uint64(5),
|
|
28
|
-
None,
|
|
29
|
-
None,
|
|
30
|
-
None,
|
|
31
|
-
None,
|
|
32
|
-
[
|
|
33
|
-
(phs[2], uint64(123), b""),
|
|
34
|
-
(phs[4], uint64(3), b"1" * 32),
|
|
35
|
-
],
|
|
36
|
-
[],
|
|
37
|
-
[],
|
|
38
|
-
[],
|
|
39
|
-
[],
|
|
40
|
-
[],
|
|
41
|
-
[],
|
|
42
|
-
[],
|
|
43
|
-
0,
|
|
44
|
-
),
|
|
45
|
-
Spend(
|
|
46
|
-
coin_ids[2],
|
|
47
|
-
parent_ids[2],
|
|
48
|
-
phs[0],
|
|
49
|
-
123,
|
|
50
|
-
None,
|
|
51
|
-
uint64(6),
|
|
52
|
-
None,
|
|
53
|
-
None,
|
|
54
|
-
None,
|
|
55
|
-
None,
|
|
56
|
-
[
|
|
57
|
-
(phs[7], uint64(123), b""),
|
|
58
|
-
(phs[4], uint64(6), b""),
|
|
59
|
-
(phs[9], uint64(123), b"1" * 32),
|
|
60
|
-
],
|
|
61
|
-
[],
|
|
62
|
-
[],
|
|
63
|
-
[],
|
|
64
|
-
[],
|
|
65
|
-
[],
|
|
66
|
-
[],
|
|
67
|
-
[],
|
|
68
|
-
0,
|
|
69
|
-
),
|
|
70
|
-
Spend(
|
|
71
|
-
coin_ids[1],
|
|
72
|
-
parent_ids[1],
|
|
73
|
-
phs[7],
|
|
74
|
-
123,
|
|
75
|
-
None,
|
|
76
|
-
uint64(2),
|
|
77
|
-
None,
|
|
78
|
-
None,
|
|
79
|
-
None,
|
|
80
|
-
None,
|
|
81
|
-
[
|
|
82
|
-
(phs[5], uint64(123), b""),
|
|
83
|
-
(phs[6], uint64(5), b"1" * 3),
|
|
84
|
-
],
|
|
85
|
-
[],
|
|
86
|
-
[],
|
|
87
|
-
[],
|
|
88
|
-
[],
|
|
89
|
-
[],
|
|
90
|
-
[],
|
|
91
|
-
[],
|
|
92
|
-
0,
|
|
93
|
-
),
|
|
20
|
+
removals = [(coin_ids[0], phs[0]), (coin_ids[2], phs[0]), (coin_ids[1], phs[7])]
|
|
21
|
+
additions = [
|
|
94
|
-
(Coin(coin_ids[0], phs[2], 123), None),
|
|
95
|
-
(Coin(coin_ids[0], phs[4], 3), b"1" * 32),
|
|
96
|
-
(Coin(coin_ids[2], phs[7], 123), None),
|
|
97
|
-
(Coin(coin_ids[2], phs[4], 6), None),
|
|
98
|
-
(Coin(coin_ids[2], phs[9], 123), b"1" * 32),
|
|
99
|
-
(Coin(coin_ids[1], phs[5], 123), None),
|
|
100
|
-
(Coin(coin_ids[1], phs[6], 5), b"1" * 3),
|
|
22
|
++
(Coin(coin_ids[0], phs[2], uint64(123)), None),
|
|
23
|
++
(Coin(coin_ids[0], phs[4], uint64(3)), b"1" * 32),
|
|
24
|
++
(Coin(coin_ids[2], phs[7], uint64(123)), None),
|
|
25
|
++
(Coin(coin_ids[2], phs[4], uint64(6)), None),
|
|
26
|
++
(Coin(coin_ids[2], phs[9], uint64(123)), b"1" * 32),
|
|
27
|
++
(Coin(coin_ids[1], phs[5], uint64(123)), None),
|
|
28
|
++
(Coin(coin_ids[1], phs[6], uint64(5)), b"1" * 3),
|
|
101
29
|
]
|
|
102
30
|
|
|
103
31
|
|
|
@@@ -2748,7 -2740,7 +2748,7 @@@ def rand_hash() -> bytes32
|
|
|
2748
2748
|
|
|
2749
2749
|
def item_cost(cost: int, fee_rate: float) -> MempoolItem:
|
|
2750
2750
|
fee = cost * fee_rate
|
|
2751
|
--
amount =
|
|
2751
|
++
amount = uint64(fee + 100)
|
|
2752
2752
|
coin = Coin(rand_hash(), rand_hash(), amount)
|
|
2753
2753
|
return mk_item([coin], cost=cost, fee=int(cost * fee_rate))
|
|
2754
2754
|
|
|
@@@ -2837,7 -2824,7 +2837,7 @@@ def test_limit_expiring_transactions(he
|
|
|
2837
2837
|
fee_rate = 2.7
|
|
2838
2838
|
for cost in items:
|
|
2839
2839
|
fee = cost * fee_rate
|
|
2840
|
--
amount =
|
|
2840
|
++
amount = uint64(fee + 100)
|
|
2841
2841
|
coin = Coin(rand_hash(), rand_hash(), amount)
|
|
2842
2842
|
if height:
|
|
2843
2843
|
ret = mempool.add_to_pool(mk_item([coin], cost=cost, fee=int(cost * fee_rate), assert_before_height=15))
|
|
@@@ -133,17 -117,14 +133,22 @@@ async def instantiate_mempool_manager
|
|
|
133
133
|
block_height: uint32 = TEST_HEIGHT,
|
|
134
134
|
block_timestamp: uint64 = TEST_TIMESTAMP,
|
|
135
135
|
constants: ConsensusConstants = DEFAULT_CONSTANTS,
|
|
136
|
++
max_tx_clvm_cost: Optional[uint64] = None,
|
|
136
137
|
) -> MempoolManager:
|
|
137
|
-
mempool_manager = MempoolManager(get_coin_records, constants)
|
|
138
|
-
mempool_manager = MempoolManager(get_coin_record, constants)
|
|
138
|
++
mempool_manager = MempoolManager(get_coin_records, constants, max_tx_clvm_cost=max_tx_clvm_cost)
|
|
139
139
|
test_block_record = create_test_block_record(height=block_height, timestamp=block_timestamp)
|
|
140
140
|
await mempool_manager.new_peak(test_block_record, None)
|
|
141
|
+
invariant_check_mempool(mempool_manager.mempool)
|
|
141
142
|
return mempool_manager
|
|
142
143
|
|
|
143
144
|
|
|
144
|
-
async def setup_mempool_with_coins(
|
|
145
|
+
async def setup_mempool_with_coins(
|
|
145
|
-
*,
|
|
146
|
++
*,
|
|
147
|
++
coin_amounts: List[int],
|
|
148
|
++
max_block_clvm_cost: Optional[int] = None,
|
|
149
|
++
max_tx_clvm_cost: Optional[uint64] = None,
|
|
150
|
++
mempool_block_buffer: Optional[int] = None,
|
|
151
|
+
) -> Tuple[MempoolManager, List[Coin]]:
|
|
146
152
|
coins = []
|
|
147
153
|
test_coin_records = {}
|
|
148
154
|
for amount in coin_amounts:
|
|
@@@ -151,19 -132,10 +156,22 @@@
|
|
|
151
156
|
coins.append(coin)
|
|
152
157
|
test_coin_records[coin.name()] = CoinRecord(coin, uint32(0), uint32(0), False, uint64(0))
|
|
153
158
|
|
|
154
|
-
async def
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
159
|
+
async def get_coin_records(coin_ids: Collection[bytes32]) -> List[CoinRecord]:
|
|
160
|
+
ret: List[CoinRecord] = []
|
|
161
|
+
for name in coin_ids:
|
|
162
|
+
r = test_coin_records.get(name)
|
|
163
|
+
if r is not None:
|
|
164
|
+
ret.append(r)
|
|
165
|
+
return ret
|
|
166
|
+
|
|
167
|
++
constants = DEFAULT_CONSTANTS
|
|
168
|
+
if max_block_clvm_cost is not None:
|
|
158
|
-
constants = dataclasses.replace(
|
|
159
|
-
|
|
160
|
-
constants =
|
|
161
|
-
mempool_manager = await instantiate_mempool_manager(
|
|
169
|
++
constants = dataclasses.replace(constants, MAX_BLOCK_COST_CLVM=max_block_clvm_cost)
|
|
170
|
++
if mempool_block_buffer is not None:
|
|
171
|
++
constants = dataclasses.replace(constants, MEMPOOL_BLOCK_BUFFER=mempool_block_buffer)
|
|
172
|
++
mempool_manager = await instantiate_mempool_manager(
|
|
173
|
++
get_coin_records, constants=constants, max_tx_clvm_cost=max_tx_clvm_cost
|
|
174
|
++
)
|
|
162
175
|
return (mempool_manager, coins)
|
|
163
176
|
|
|
164
177
|
|
|
@@@ -404,13 -359,9 +412,13 @@@ def make_bundle_spends_map_and_fee
|
|
|
404
412
|
coin_id = bytes32(spend.coin_id)
|
|
405
413
|
spend_additions = []
|
|
406
414
|
for puzzle_hash, amount, _ in spend.create_coin:
|
|
407
|
--
spend_additions.append(Coin(coin_id, puzzle_hash, amount))
|
|
415
|
++
spend_additions.append(Coin(coin_id, puzzle_hash, uint64(amount)))
|
|
408
416
|
additions_amount += amount
|
|
409
|
-
eligibility_and_additions[coin_id] = (
|
|
417
|
+
eligibility_and_additions[coin_id] = EligibilityAndAdditions(
|
|
418
|
+
is_eligible_for_dedup=bool(spend.flags & ELIGIBLE_FOR_DEDUP),
|
|
419
|
+
spend_additions=spend_additions,
|
|
420
|
+
is_eligible_for_ff=bool(spend.flags & ELIGIBLE_FOR_FF),
|
|
421
|
+
)
|
|
410
422
|
for coin_spend in spend_bundle.coin_spends:
|
|
411
423
|
coin_id = coin_spend.coin.name()
|
|
412
424
|
removals_amount += coin_spend.coin.amount
|
|
@@@ -681,7 -625,7 +689,7 @@@ async def test_ephemeral_timelock
|
|
|
681
689
|
)
|
|
682
690
|
|
|
683
691
|
conditions = [[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, 1]]
|
|
684
|
--
created_coin = Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, 1)
|
|
692
|
++
created_coin = Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, uint64(1))
|
|
685
693
|
sb1 = spend_bundle_from_conditions(conditions)
|
|
686
694
|
sb2 = spend_bundle_from_conditions([[opcode, lock_value]], created_coin)
|
|
687
695
|
# sb spends TEST_COIN and creates created_coin which gets spent too
|
|
@@@ -739,7 -683,7 +747,7 @@@ def mk_item
|
|
|
739
747
|
def make_test_coins() -> List[Coin]:
|
|
740
748
|
ret: List[Coin] = []
|
|
741
749
|
for i in range(5):
|
|
742
|
--
ret.append(Coin(height_hash(i), height_hash(i + 100), i * 100))
|
|
750
|
++
ret.append(Coin(height_hash(i), height_hash(i + 100), uint64(i * 100)))
|
|
743
751
|
return ret
|
|
744
752
|
|
|
745
753
|
|
|
@@@ -992,8 -928,8 +1000,8 @@@ async def test_create_bundle_from_mempo
|
|
|
992
1000
|
assert result[1] == MempoolInclusionStatus.SUCCESS
|
|
993
1001
|
|
|
994
1002
|
mempool_manager, coins = await setup_mempool_with_coins(coin_amounts=list(range(2000000000, 2000002200)))
|
|
995
|
--
high_rate_spends = await make_coin_spends(coins[0:
|
|
996
|
--
low_rate_spends = await make_coin_spends(coins[
|
|
1003
|
++
high_rate_spends = await make_coin_spends(coins[0:2200])
|
|
1004
|
++
low_rate_spends = await make_coin_spends(coins[2200:2400], high_fees=False)
|
|
997
1005
|
spends = low_rate_spends + high_rate_spends if reverse_tx_order else high_rate_spends + low_rate_spends
|
|
998
1006
|
await send_spends_to_mempool(spends)
|
|
999
1007
|
assert mempool_manager.peak is not None
|
|
@@@ -1022,71 -947,30 +1030,74 @@@ async def test_create_bundle_from_mempo
|
|
|
1022
1030
|
async def make_and_send_big_cost_sb(coin: Coin) -> None:
|
|
1023
1031
|
conditions = []
|
|
1024
1032
|
g1 = G1Element()
|
|
1025
|
-
for _ in range(
|
|
1026
|
-
for _ in range(2436):
|
|
1033
|
++
for _ in range(144):
|
|
1027
1034
|
conditions.append([ConditionOpcode.AGG_SIG_UNSAFE, g1, IDENTITY_PUZZLE_HASH])
|
|
1028
|
-
conditions.append([ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, coin.amount -
|
|
1035
|
+
conditions.append([ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, coin.amount - 10_000_000])
|
|
1029
1036
|
# Create a spend bundle with a big enough cost that gets it close to the limit
|
|
1030
1037
|
_, _, res = await generate_and_add_spendbundle(mempool_manager, conditions, coin)
|
|
1031
1038
|
assert res[1] == MempoolInclusionStatus.SUCCESS
|
|
1032
1039
|
|
|
1033
|
-
mempool_manager, coins = await setup_mempool_with_coins(
|
|
1034
|
-
# Create a spend bundle with a big enough cost that gets it close to the limit
|
|
1035
|
-
await make_and_send_big_cost_sb(coins[0])
|
|
1036
|
-
# Create a second spend bundle with a relatively smaller cost.
|
|
1037
|
-
# Combined with the first spend bundle, we'd exceed the maximum block clvm cost
|
|
1038
|
-
conditions = [[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, coins[1].amount - 2]]
|
|
1039
|
-
sb2, _, res = await generate_and_add_spendbundle(mempool_manager, conditions, coins[1])
|
|
1040
|
+
mempool_manager, coins = await setup_mempool_with_coins(
|
|
1040
|
-
coin_amounts=list(range(1_000_000_000, 1_000_000_030)),
|
|
1041
|
++
coin_amounts=list(range(1_000_000_000, 1_000_000_030)),
|
|
1042
|
++
max_block_clvm_cost=550_000_000,
|
|
1043
|
++
max_tx_clvm_cost=uint64(550_000_000 * 0.6),
|
|
1044
|
++
mempool_block_buffer=20,
|
|
1045
|
+
)
|
|
1046
|
+
# Create the spend bundles with a big enough cost that they get close to the limit
|
|
1047
|
+
for i in range(num_skipped_items):
|
|
1048
|
+
await make_and_send_big_cost_sb(coins[i])
|
|
1049
|
+
|
|
1050
|
+
# Create a spend bundle with a relatively smaller cost.
|
|
1051
|
+
# Combined with a big cost spend bundle, we'd exceed the maximum block clvm cost
|
|
1052
|
+
sb2_coin = coins[num_skipped_items]
|
|
1053
|
+
conditions = [[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, sb2_coin.amount - 200_000]]
|
|
1054
|
+
sb2, _, res = await generate_and_add_spendbundle(mempool_manager, conditions, sb2_coin)
|
|
1041
1055
|
assert res[1] == MempoolInclusionStatus.SUCCESS
|
|
1042
|
-
sb2_addition = Coin(sb2_coin.name(), IDENTITY_PUZZLE_HASH, sb2_coin.amount - 200_000)
|
|
1056
|
++
sb2_addition = Coin(sb2_coin.name(), IDENTITY_PUZZLE_HASH, uint64(sb2_coin.amount - 200_000))
|
|
1057
|
+
# Create 4 extra spend bundles with smaller FPC and smaller costs
|
|
1058
|
+
extra_sbs = []
|
|
1059
|
+
extra_additions = []
|
|
1060
|
+
for i in range(num_skipped_items + 1, num_skipped_items + 5):
|
|
1061
|
+
conditions = [[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, coins[i].amount - 30_000]]
|
|
1062
|
+
# Make the first of these without eligible coins
|
|
1063
|
+
if i == num_skipped_items + 1:
|
|
1064
|
+
conditions.append([ConditionOpcode.AGG_SIG_UNSAFE, G1Element(), IDENTITY_PUZZLE_HASH])
|
|
1065
|
+
sb, _, res = await generate_and_add_spendbundle(mempool_manager, conditions, coins[i])
|
|
1066
|
+
extra_sbs.append(sb)
|
|
1043
|
-
coin = Coin(coins[i].name(), IDENTITY_PUZZLE_HASH, coins[i].amount - 30_000)
|
|
1067
|
++
coin = Coin(coins[i].name(), IDENTITY_PUZZLE_HASH, uint64(coins[i].amount - 30_000))
|
|
1068
|
+
extra_additions.append(coin)
|
|
1069
|
+
assert res[1] == MempoolInclusionStatus.SUCCESS
|
|
1070
|
+
|
|
1044
1071
|
assert mempool_manager.peak is not None
|
|
1045
|
-
|
|
1072
|
+
caplog.set_level(logging.DEBUG)
|
|
1073
|
+
result = await mempool_manager.create_bundle_from_mempool(
|
|
1074
|
+
mempool_manager.peak.header_hash, get_unspent_lineage_info_for_puzzle_hash
|
|
1075
|
+
)
|
|
1046
1076
|
assert result is not None
|
|
1047
1077
|
agg, additions = result
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1078
|
+
skipped_due_to_eligible_coins = sum(
|
|
1079
|
+
1
|
|
1080
|
+
for line in caplog.text.split("\n")
|
|
1081
|
+
if "DEBUG Exception while checking a mempool item for deduplication: Skipping transaction with eligible coin(s)"
|
|
1082
|
+
in line
|
|
1083
|
+
)
|
|
1084
|
+
if num_skipped_items == PRIORITY_TX_THRESHOLD:
|
|
1085
|
+
# We skipped enough big cost items to reach `PRIORITY_TX_THRESHOLD`,
|
|
1086
|
+
# so the first from the extra 4 (the one without eligible coins) went in,
|
|
1087
|
+
# and the other 3 were skipped (they have eligible coins)
|
|
1088
|
+
assert skipped_due_to_eligible_coins == 3
|
|
1089
|
+
assert agg == SpendBundle.aggregate([sb2, extra_sbs[0]])
|
|
1090
|
+
assert additions == [sb2_addition, extra_additions[0]]
|
|
1091
|
+
assert agg.removals() == [sb2_coin, coins[num_skipped_items + 1]]
|
|
1092
|
+
elif num_skipped_items == MAX_SKIPPED_ITEMS:
|
|
1093
|
+
# We skipped enough big cost items to trigger `MAX_SKIPPED_ITEMS` so
|
|
1094
|
+
# we didn't process any of the extra items
|
|
1095
|
+
assert skipped_due_to_eligible_coins == 0
|
|
1096
|
+
assert agg == SpendBundle.aggregate([sb2])
|
|
1097
|
+
assert additions == [sb2_addition]
|
|
1098
|
+
assert agg.removals() == [sb2_coin]
|
|
1099
|
+
else:
|
|
1100
|
+
raise ValueError("num_skipped_items must be PRIORITY_TX_THRESHOLD or MAX_SKIPPED_ITEMS") # pragma: no cover
|
|
1053
1101
|
|
|
1054
1102
|
|
|
1055
1103
|
@pytest.mark.parametrize(
|
|
@@@ -1345,7 -1221,7 +1356,7 @@@ def test_dedup_info_nothing_to_do() ->
|
|
|
1345
1356
|
)
|
|
1346
1357
|
assert unique_coin_spends == sb.coin_spends
|
|
1347
1358
|
assert cost_saving == 0
|
|
1348
|
--
assert unique_additions == [Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, 1)]
|
|
1359
|
++
assert unique_additions == [Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, uint64(1))]
|
|
1349
1360
|
assert eligible_coin_spends == EligibleCoinSpends()
|
|
1350
1361
|
|
|
1351
1362
|
|
|
@@@ -1366,8 -1241,8 +1377,8 @@@ def test_dedup_info_eligible_1st_time(
|
|
|
1366
1377
|
assert unique_coin_spends == sb.coin_spends
|
|
1367
1378
|
assert cost_saving == 0
|
|
1368
1379
|
assert set(unique_additions) == {
|
|
1369
|
--
Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, 1),
|
|
1370
|
--
Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, 2),
|
|
1380
|
++
Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, uint64(1)),
|
|
1381
|
++
Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, uint64(2)),
|
|
1371
1382
|
}
|
|
1372
1383
|
assert eligible_coin_spends == EligibleCoinSpends({TEST_COIN_ID: DedupCoinSpend(solution=solution, cost=None)})
|
|
1373
1384
|
|
|
@@@ -1412,7 -1285,7 +1423,7 @@@ def test_dedup_info_eligible_2nd_time_a
|
|
|
1412
1423
|
assert unique_coin_spends == sb2.coin_spends
|
|
1413
1424
|
saved_cost = uint64(3600044)
|
|
1414
1425
|
assert cost_saving == saved_cost
|
|
1415
|
--
assert unique_additions == [Coin(TEST_COIN_ID2, IDENTITY_PUZZLE_HASH, 3)]
|
|
1426
|
++
assert unique_additions == [Coin(TEST_COIN_ID2, IDENTITY_PUZZLE_HASH, uint64(3))]
|
|
1416
1427
|
# The coin we encountered a second time has its cost and additions properly updated
|
|
1417
1428
|
# The coin we encountered for the first time gets cost None and an empty set of additions
|
|
1418
1429
|
expected_eligible_spends = EligibleCoinSpends(
|
|
@@@ -1456,7 -1328,7 +1467,7 @@@ def test_dedup_info_eligible_3rd_time_a
|
|
|
1456
1467
|
assert unique_coin_spends == sb3.coin_spends
|
|
1457
1468
|
saved_cost2 = uint64(1800044)
|
|
1458
1469
|
assert cost_saving == saved_cost + saved_cost2
|
|
1459
|
--
assert unique_additions == [Coin(TEST_COIN_ID3, IDENTITY_PUZZLE_HASH, 4)]
|
|
1470
|
++
assert unique_additions == [Coin(TEST_COIN_ID3, IDENTITY_PUZZLE_HASH, uint64(4))]
|
|
1460
1471
|
expected_eligible_spends = EligibleCoinSpends(
|
|
1461
1472
|
{
|
|
1462
1473
|
TEST_COIN_ID: DedupCoinSpend(initial_solution, saved_cost),
|
|
@@@ -1476,7 -1348,7 +1487,7 @@@ async def test_coin_spending_different_
|
|
|
1476
1487
|
the reorg code paths
|
|
1477
1488
|
"""
|
|
1478
1489
|
new_height = uint32(TEST_HEIGHT + new_height_step)
|
|
1479
|
--
coin = Coin(IDENTITY_PUZZLE_HASH, IDENTITY_PUZZLE_HASH, 100)
|
|
1490
|
++
coin = Coin(IDENTITY_PUZZLE_HASH, IDENTITY_PUZZLE_HASH, uint64(100))
|
|
1480
1491
|
coin_id = coin.name()
|
|
1481
1492
|
test_coin_records = {coin_id: CoinRecord(coin, uint32(0), uint32(0), False, uint64(0))}
|
|
1482
1493
|
|
|
@@@ -1,554 -1,0 +1,554 @@@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import dataclasses
|
|
4
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
5
|
+
|
|
6
|
+
import pytest
|
|
7
|
+
from chia_rs import AugSchemeMPL, G1Element, G2Element, PrivateKey
|
|
8
|
+
from more_itertools import partition
|
|
9
|
+
|
|
10
|
+
from chia.clvm.spend_sim import SimClient, SpendSim, sim_and_client
|
|
11
|
+
from chia.consensus.default_constants import DEFAULT_CONSTANTS
|
|
12
|
+
from chia.types.blockchain_format.coin import Coin
|
|
13
|
+
from chia.types.blockchain_format.program import Program
|
|
14
|
+
from chia.types.blockchain_format.serialized_program import SerializedProgram
|
|
15
|
+
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
16
|
+
from chia.types.coin_spend import CoinSpend, make_spend
|
|
17
|
+
from chia.types.condition_opcodes import ConditionOpcode
|
|
18
|
+
from chia.types.eligible_coin_spends import EligibleCoinSpends, UnspentLineageInfo, perform_the_fast_forward
|
|
19
|
+
from chia.types.internal_mempool_item import InternalMempoolItem
|
|
20
|
+
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
|
|
21
|
+
from chia.types.mempool_item import BundleCoinSpend
|
|
22
|
+
from chia.types.spend_bundle import SpendBundle
|
|
23
|
+
from chia.util.errors import Err
|
|
24
|
+
from chia.util.ints import uint64
|
|
25
|
+
from chia.wallet.puzzles import p2_conditions, p2_delegated_puzzle_or_hidden_puzzle
|
|
26
|
+
from chia.wallet.puzzles import singleton_top_layer_v1_1 as singleton_top_layer
|
|
27
|
+
from tests.clvm.test_puzzles import public_key_for_index, secret_exponent_for_index
|
|
28
|
+
from tests.core.mempool.test_mempool_manager import (
|
|
29
|
+
IDENTITY_PUZZLE,
|
|
30
|
+
IDENTITY_PUZZLE_HASH,
|
|
31
|
+
TEST_COIN,
|
|
32
|
+
TEST_COIN_ID,
|
|
33
|
+
TEST_HEIGHT,
|
|
34
|
+
mempool_item_from_spendbundle,
|
|
35
|
+
spend_bundle_from_conditions,
|
|
36
|
+
)
|
|
37
|
+
from tests.util.key_tool import KeyTool
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@pytest.mark.anyio
|
|
41
|
+
async def test_process_fast_forward_spends_nothing_to_do() -> None:
|
|
42
|
+
"""
|
|
43
|
+
This tests the case when we don't have an eligible coin, so there is
|
|
44
|
+
nothing to fast forward and the item remains unchanged
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
async def get_unspent_lineage_info_for_puzzle_hash(_: bytes32) -> Optional[UnspentLineageInfo]:
|
|
48
|
+
assert False # pragma: no cover
|
|
49
|
+
|
|
50
|
+
conditions = [[ConditionOpcode.AGG_SIG_UNSAFE, G1Element(), IDENTITY_PUZZLE_HASH]]
|
|
51
|
+
sb = spend_bundle_from_conditions(conditions, TEST_COIN)
|
|
52
|
+
item = mempool_item_from_spendbundle(sb)
|
|
53
|
+
# This coin is not eligible for fast forward
|
|
54
|
+
assert item.bundle_coin_spends[TEST_COIN_ID].eligible_for_fast_forward is False
|
|
55
|
+
internal_mempool_item = InternalMempoolItem(
|
|
56
|
+
sb, item.npc_result, item.height_added_to_mempool, item.bundle_coin_spends
|
|
57
|
+
)
|
|
58
|
+
original_version = dataclasses.replace(internal_mempool_item)
|
|
59
|
+
eligible_coin_spends = EligibleCoinSpends()
|
|
60
|
+
await eligible_coin_spends.process_fast_forward_spends(
|
|
61
|
+
mempool_item=internal_mempool_item,
|
|
62
|
+
get_unspent_lineage_info_for_puzzle_hash=get_unspent_lineage_info_for_puzzle_hash,
|
|
63
|
+
height=TEST_HEIGHT,
|
|
64
|
+
constants=DEFAULT_CONSTANTS,
|
|
65
|
+
)
|
|
66
|
+
assert eligible_coin_spends == EligibleCoinSpends()
|
|
67
|
+
assert internal_mempool_item == original_version
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
@pytest.mark.anyio
|
|
71
|
+
async def test_process_fast_forward_spends_unknown_ff() -> None:
|
|
72
|
+
"""
|
|
73
|
+
This tests the case when we process for the first time but we are unable
|
|
74
|
+
to lookup the latest version from the DB
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
async def get_unspent_lineage_info_for_puzzle_hash(puzzle_hash: bytes32) -> Optional[UnspentLineageInfo]:
|
|
78
|
+
if puzzle_hash == IDENTITY_PUZZLE_HASH:
|
|
79
|
+
return None
|
|
80
|
+
assert False # pragma: no cover
|
|
81
|
+
|
|
1
|
-
test_coin = Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, 1)
|
|
82
|
++
test_coin = Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, uint64(1))
|
|
83
|
+
conditions = [[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, 1]]
|
|
84
|
+
sb = spend_bundle_from_conditions(conditions, test_coin)
|
|
85
|
+
item = mempool_item_from_spendbundle(sb)
|
|
86
|
+
# The coin is eligible for fast forward
|
|
87
|
+
assert item.bundle_coin_spends[test_coin.name()].eligible_for_fast_forward is True
|
|
88
|
+
internal_mempool_item = InternalMempoolItem(
|
|
89
|
+
sb, item.npc_result, item.height_added_to_mempool, item.bundle_coin_spends
|
|
90
|
+
)
|
|
91
|
+
eligible_coin_spends = EligibleCoinSpends()
|
|
92
|
+
# We have no fast forward records yet, so we'll process this coin for the
|
|
93
|
+
# first time here, but the DB lookup will return None
|
|
94
|
+
with pytest.raises(ValueError, match="Cannot proceed with singleton spend fast forward."):
|
|
95
|
+
await eligible_coin_spends.process_fast_forward_spends(
|
|
96
|
+
mempool_item=internal_mempool_item,
|
|
97
|
+
get_unspent_lineage_info_for_puzzle_hash=get_unspent_lineage_info_for_puzzle_hash,
|
|
98
|
+
height=TEST_HEIGHT,
|
|
99
|
+
constants=DEFAULT_CONSTANTS,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@pytest.mark.anyio
|
|
104
|
+
async def test_process_fast_forward_spends_latest_unspent() -> None:
|
|
105
|
+
"""
|
|
106
|
+
This tests the case when we are the latest singleton version already, so
|
|
107
|
+
we don't need to fast forward, we just need to set the next version from
|
|
108
|
+
our additions to chain ff spends.
|
|
109
|
+
"""
|
|
2
|
-
test_amount = 3
|
|
110
|
++
test_amount = uint64(3)
|
|
111
|
+
test_coin = Coin(TEST_COIN_ID, IDENTITY_PUZZLE_HASH, test_amount)
|
|
112
|
+
test_unspent_lineage_info = UnspentLineageInfo(
|
|
113
|
+
coin_id=test_coin.name(),
|
|
114
|
+
coin_amount=test_coin.amount,
|
|
115
|
+
parent_id=test_coin.parent_coin_info,
|
|
116
|
+
parent_amount=test_coin.amount,
|
|
117
|
+
parent_parent_id=TEST_COIN_ID,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
async def get_unspent_lineage_info_for_puzzle_hash(puzzle_hash: bytes32) -> Optional[UnspentLineageInfo]:
|
|
121
|
+
if puzzle_hash == IDENTITY_PUZZLE_HASH:
|
|
122
|
+
return test_unspent_lineage_info
|
|
123
|
+
assert False # pragma: no cover
|
|
124
|
+
|
|
125
|
+
# At this point, spends are considered *potentially* eligible for singleton
|
|
126
|
+
# fast forward mainly when their amount is even and they don't have conditions
|
|
127
|
+
# that disqualify them
|
|
128
|
+
conditions = [[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, test_amount - 2]]
|
|
129
|
+
sb = spend_bundle_from_conditions(conditions, test_coin)
|
|
130
|
+
item = mempool_item_from_spendbundle(sb)
|
|
131
|
+
assert item.bundle_coin_spends[test_coin.name()].eligible_for_fast_forward is True
|
|
132
|
+
internal_mempool_item = InternalMempoolItem(
|
|
133
|
+
sb, item.npc_result, item.height_added_to_mempool, item.bundle_coin_spends
|
|
134
|
+
)
|
|
135
|
+
original_version = dataclasses.replace(internal_mempool_item)
|
|
136
|
+
eligible_coin_spends = EligibleCoinSpends()
|
|
137
|
+
await eligible_coin_spends.process_fast_forward_spends(
|
|
138
|
+
mempool_item=internal_mempool_item,
|
|
139
|
+
get_unspent_lineage_info_for_puzzle_hash=get_unspent_lineage_info_for_puzzle_hash,
|
|
140
|
+
height=TEST_HEIGHT,
|
|
141
|
+
constants=DEFAULT_CONSTANTS,
|
|
142
|
+
)
|
|
143
|
+
child_coin = item.bundle_coin_spends[test_coin.name()].additions[0]
|
|
144
|
+
expected_fast_forward_spends = {
|
|
145
|
+
IDENTITY_PUZZLE_HASH: UnspentLineageInfo(
|
|
146
|
+
coin_id=child_coin.name(),
|
|
147
|
+
coin_amount=child_coin.amount,
|
|
148
|
+
parent_id=test_coin.name(),
|
|
149
|
+
parent_amount=test_coin.amount,
|
|
150
|
+
parent_parent_id=test_coin.parent_coin_info,
|
|
151
|
+
)
|
|
152
|
+
}
|
|
153
|
+
# We have set the next version from our additions to chain ff spends
|
|
154
|
+
assert eligible_coin_spends.fast_forward_spends == expected_fast_forward_spends
|
|
155
|
+
# We didn't need to fast forward the item so it stays as is
|
|
156
|
+
assert internal_mempool_item == original_version
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def test_perform_the_fast_forward() -> None:
|
|
160
|
+
"""
|
|
161
|
+
This test attempts to spend a coin that is already spent and the current
|
|
162
|
+
unspent version is its grandchild. We fast forward the test coin spend into
|
|
163
|
+
a spend of that latest unspent
|
|
164
|
+
"""
|
|
165
|
+
test_parent_id = bytes32.from_hexstr("0x039759eda861cd44c0af6c9501300f66fe4f5de144b8ae4fc4e8da35701f38ac")
|
|
166
|
+
test_ph = bytes32.from_hexstr("0x9ae0917f3ca301f934468ec60412904c0a88b232aeabf220c01ef53054e0281a")
|
|
3
|
-
test_amount = 1337
|
|
167
|
++
test_amount = uint64(1337)
|
|
168
|
+
test_coin = Coin(test_parent_id, test_ph, test_amount)
|
|
169
|
+
test_child_coin = Coin(test_coin.name(), test_ph, test_amount)
|
|
170
|
+
latest_unspent_coin = Coin(test_child_coin.name(), test_ph, test_amount)
|
|
171
|
+
# This spend setup makes us eligible for fast forward so that we perform a
|
|
172
|
+
# meaningful fast forward on the rust side. It was generated using the
|
|
173
|
+
# singleton/child/grandchild dynamics that we have in
|
|
174
|
+
# `test_singleton_fast_forward_different_block` to get a realistic test case
|
|
175
|
+
test_puzzle_reveal = SerializedProgram.fromhex(
|
|
176
|
+
"ff02ffff01ff02ffff01ff02ffff03ffff18ff2fff3480ffff01ff04ffff04ff20ffff04ff2fff808080ffff04ffff02ff3effff04ff0"
|
|
177
|
+
"2ffff04ff05ffff04ffff02ff2affff04ff02ffff04ff27ffff04ffff02ffff03ff77ffff01ff02ff36ffff04ff02ffff04ff09ffff04"
|
|
178
|
+
"ff57ffff04ffff02ff2effff04ff02ffff04ff05ff80808080ff808080808080ffff011d80ff0180ffff04ffff02ffff03ff77ffff018"
|
|
179
|
+
"1b7ffff015780ff0180ff808080808080ffff04ff77ff808080808080ffff02ff3affff04ff02ffff04ff05ffff04ffff02ff0bff5f80"
|
|
180
|
+
"ffff01ff8080808080808080ffff01ff088080ff0180ffff04ffff01ffffffff4947ff0233ffff0401ff0102ffffff20ff02ffff03ff0"
|
|
181
|
+
"5ffff01ff02ff32ffff04ff02ffff04ff0dffff04ffff0bff3cffff0bff34ff2480ffff0bff3cffff0bff3cffff0bff34ff2c80ff0980"
|
|
182
|
+
"ffff0bff3cff0bffff0bff34ff8080808080ff8080808080ffff010b80ff0180ffff02ffff03ffff22ffff09ffff0dff0580ff2280fff"
|
|
183
|
+
"f09ffff0dff0b80ff2280ffff15ff17ffff0181ff8080ffff01ff0bff05ff0bff1780ffff01ff088080ff0180ff02ffff03ff0bffff01"
|
|
184
|
+
"ff02ffff03ffff02ff26ffff04ff02ffff04ff13ff80808080ffff01ff02ffff03ffff20ff1780ffff01ff02ffff03ffff09ff81b3fff"
|
|
185
|
+
"f01818f80ffff01ff02ff3affff04ff02ffff04ff05ffff04ff1bffff04ff34ff808080808080ffff01ff04ffff04ff23ffff04ffff02"
|
|
186
|
+
"ff36ffff04ff02ffff04ff09ffff04ff53ffff04ffff02ff2effff04ff02ffff04ff05ff80808080ff808080808080ff738080ffff02f"
|
|
187
|
+
"f3affff04ff02ffff04ff05ffff04ff1bffff04ff34ff8080808080808080ff0180ffff01ff088080ff0180ffff01ff04ff13ffff02ff"
|
|
188
|
+
"3affff04ff02ffff04ff05ffff04ff1bffff04ff17ff8080808080808080ff0180ffff01ff02ffff03ff17ff80ffff01ff088080ff018"
|
|
189
|
+
"080ff0180ffffff02ffff03ffff09ff09ff3880ffff01ff02ffff03ffff18ff2dffff010180ffff01ff0101ff8080ff0180ff8080ff01"
|
|
190
|
+
"80ff0bff3cffff0bff34ff2880ffff0bff3cffff0bff3cffff0bff34ff2c80ff0580ffff0bff3cffff02ff32ffff04ff02ffff04ff07f"
|
|
191
|
+
"fff04ffff0bff34ff3480ff8080808080ffff0bff34ff8080808080ffff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff2eff"
|
|
192
|
+
"ff04ff02ffff04ff09ff80808080ffff02ff2effff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff02fff"
|
|
193
|
+
"f03ffff21ff17ffff09ff0bff158080ffff01ff04ff30ffff04ff0bff808080ffff01ff088080ff0180ff018080ffff04ffff01ffa07f"
|
|
194
|
+
"aa3253bfddd1e0decb0906b2dc6247bbc4cf608f58345d173adb63e8b47c9fffa030d940e53ed5b56fee3ae46ba5f4e59da5e2cc9242f"
|
|
195
|
+
"6e482fe1f1e4d9a463639a0eff07522495060c066f66f32acc2a77e3a3e737aca8baea4d1a64ea4cdc13da9ffff04ffff010dff018080"
|
|
196
|
+
"80"
|
|
197
|
+
)
|
|
198
|
+
test_solution = SerializedProgram.fromhex(
|
|
199
|
+
"ffffa030d940e53ed5b56fee3ae46ba5f4e59da5e2cc9242f6e482fe1f1e4d9a463639ffa0c7b89cfb9abf2c4cb212a4840b37d762f4c"
|
|
200
|
+
"880b8517b0dadb0c310ded24dd86dff82053980ff820539ffff80ffff01ffff33ffa0c7b89cfb9abf2c4cb212a4840b37d762f4c880b8"
|
|
201
|
+
"517b0dadb0c310ded24dd86dff8205398080ff808080"
|
|
202
|
+
)
|
|
203
|
+
test_coin_spend = CoinSpend(test_coin, test_puzzle_reveal, test_solution)
|
|
204
|
+
test_spend_data = BundleCoinSpend(test_coin_spend, False, True, [test_child_coin])
|
|
205
|
+
test_unspent_lineage_info = UnspentLineageInfo(
|
|
206
|
+
coin_id=latest_unspent_coin.name(),
|
|
207
|
+
coin_amount=latest_unspent_coin.amount,
|
|
208
|
+
parent_id=latest_unspent_coin.parent_coin_info,
|
|
209
|
+
parent_amount=test_child_coin.amount,
|
|
210
|
+
parent_parent_id=test_child_coin.parent_coin_info,
|
|
211
|
+
)
|
|
212
|
+
# Start from a fresh state of fast forward spends
|
|
213
|
+
fast_forward_spends: Dict[bytes32, UnspentLineageInfo] = {}
|
|
214
|
+
# Perform the fast forward on the test coin (the grandparent)
|
|
215
|
+
new_coin_spend, patched_additions = perform_the_fast_forward(
|
|
216
|
+
test_unspent_lineage_info, test_spend_data, fast_forward_spends
|
|
217
|
+
)
|
|
218
|
+
# Make sure the new coin we got is the grandchild (latest unspent version)
|
|
219
|
+
assert new_coin_spend.coin == latest_unspent_coin
|
|
220
|
+
# Make sure the puzzle reveal is intact
|
|
221
|
+
assert new_coin_spend.puzzle_reveal == test_coin_spend.puzzle_reveal
|
|
222
|
+
# Make sure the solution got patched
|
|
223
|
+
assert new_coin_spend.solution != test_coin_spend.solution
|
|
224
|
+
# Make sure the additions got patched
|
|
225
|
+
expected_child_coin = Coin(latest_unspent_coin.name(), test_ph, test_amount)
|
|
226
|
+
assert patched_additions == [expected_child_coin]
|
|
227
|
+
# Make sure the new fast forward state got updated with the latest unspent
|
|
228
|
+
# becoming the new child, with its parent being the version we just spent
|
|
229
|
+
# (previously latest unspent)
|
|
230
|
+
expected_unspent_lineage_info = UnspentLineageInfo(
|
|
231
|
+
coin_id=expected_child_coin.name(),
|
|
232
|
+
coin_amount=expected_child_coin.amount,
|
|
233
|
+
parent_id=latest_unspent_coin.name(),
|
|
234
|
+
parent_amount=latest_unspent_coin.amount,
|
|
235
|
+
parent_parent_id=latest_unspent_coin.parent_coin_info,
|
|
236
|
+
)
|
|
237
|
+
assert fast_forward_spends == {test_ph: expected_unspent_lineage_info}
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def sign_delegated_puz(del_puz: Program, coin: Coin) -> G2Element:
|
|
241
|
+
synthetic_secret_key: PrivateKey = p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_secret_key(
|
|
242
|
+
PrivateKey.from_bytes(secret_exponent_for_index(1).to_bytes(32, "big")),
|
|
243
|
+
p2_delegated_puzzle_or_hidden_puzzle.DEFAULT_HIDDEN_PUZZLE_HASH,
|
|
244
|
+
)
|
|
245
|
+
return AugSchemeMPL.sign(
|
|
246
|
+
synthetic_secret_key, (del_puz.get_tree_hash() + coin.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA)
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
async def make_and_send_spend_bundle(
|
|
251
|
+
sim: SpendSim,
|
|
252
|
+
sim_client: SimClient,
|
|
253
|
+
coin_spends: List[CoinSpend],
|
|
254
|
+
is_eligible_for_ff: bool = True,
|
|
255
|
+
is_launcher_coin: bool = False,
|
|
256
|
+
signing_puzzle: Optional[Program] = None,
|
|
257
|
+
signing_coin: Optional[Coin] = None,
|
|
258
|
+
) -> Tuple[MempoolInclusionStatus, Optional[Err]]:
|
|
259
|
+
if is_launcher_coin or not is_eligible_for_ff:
|
|
260
|
+
assert signing_puzzle is not None
|
|
261
|
+
assert signing_coin is not None
|
|
262
|
+
signature = sign_delegated_puz(signing_puzzle, signing_coin)
|
|
263
|
+
else:
|
|
264
|
+
signature = G2Element()
|
|
265
|
+
spend_bundle = SpendBundle(coin_spends, signature)
|
|
266
|
+
status, error = await sim_client.push_tx(spend_bundle)
|
|
267
|
+
if error is None:
|
|
268
|
+
await sim.farm_block()
|
|
269
|
+
return status, error
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
async def get_singleton_and_remaining_coins(sim: SpendSim) -> Tuple[Coin, List[Coin]]:
|
|
273
|
+
coins = await sim.all_non_reward_coins()
|
|
274
|
+
singletons, remaining_coins = partition(lambda coin: coin.amount % 2 == 0, coins)
|
|
275
|
+
singletons_list = list(singletons)
|
|
276
|
+
assert len(singletons_list) == 1
|
|
277
|
+
return singletons_list[0], list(remaining_coins)
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def make_singleton_coin_spend(
|
|
281
|
+
parent_coin_spend: CoinSpend,
|
|
282
|
+
coin_to_spend: Coin,
|
|
283
|
+
inner_puzzle: Program,
|
|
284
|
+
inner_conditions: List[List[Any]],
|
|
285
|
+
is_eve_spend: bool = False,
|
|
286
|
+
) -> Tuple[CoinSpend, Program]:
|
|
287
|
+
lineage_proof = singleton_top_layer.lineage_proof_for_coinsol(parent_coin_spend)
|
|
288
|
+
delegated_puzzle = Program.to((1, inner_conditions))
|
|
289
|
+
inner_solution = Program.to([[], delegated_puzzle, []])
|
|
290
|
+
solution = singleton_top_layer.solution_for_singleton(lineage_proof, uint64(coin_to_spend.amount), inner_solution)
|
|
291
|
+
if is_eve_spend:
|
|
292
|
+
# Parent here is the launcher coin
|
|
293
|
+
puzzle_reveal = SerializedProgram.from_program(
|
|
294
|
+
singleton_top_layer.puzzle_for_singleton(parent_coin_spend.coin.name(), inner_puzzle)
|
|
295
|
+
)
|
|
296
|
+
else:
|
|
297
|
+
puzzle_reveal = parent_coin_spend.puzzle_reveal
|
|
298
|
+
return make_spend(coin_to_spend, puzzle_reveal, solution), delegated_puzzle
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
async def prepare_singleton_eve(
|
|
302
|
+
sim: SpendSim, sim_client: SimClient, is_eligible_for_ff: bool, start_amount: uint64, singleton_amount: uint64
|
|
303
|
+
) -> Tuple[Program, CoinSpend, Program]:
|
|
304
|
+
# Generate starting info
|
|
305
|
+
key_lookup = KeyTool()
|
|
306
|
+
pk = G1Element.from_bytes(public_key_for_index(1, key_lookup))
|
|
307
|
+
starting_puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_pk(pk)
|
|
308
|
+
if is_eligible_for_ff:
|
|
309
|
+
# This program allows us to control conditions through solutions
|
|
310
|
+
inner_puzzle = Program.to(13)
|
|
311
|
+
else:
|
|
312
|
+
inner_puzzle = starting_puzzle
|
|
313
|
+
inner_puzzle_hash = inner_puzzle.get_tree_hash()
|
|
314
|
+
# Get our starting standard coin created
|
|
315
|
+
await sim.farm_block(starting_puzzle.get_tree_hash())
|
|
316
|
+
records = await sim_client.get_coin_records_by_puzzle_hash(starting_puzzle.get_tree_hash())
|
|
317
|
+
starting_coin = records[0].coin
|
|
318
|
+
# Launching
|
|
319
|
+
conditions, launcher_coin_spend = singleton_top_layer.launch_conditions_and_coinsol(
|
|
320
|
+
coin=starting_coin, inner_puzzle=inner_puzzle, comment=[], amount=start_amount
|
|
321
|
+
)
|
|
322
|
+
# Keep a remaining coin with an even amount
|
|
323
|
+
conditions.append(
|
|
324
|
+
Program.to([ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, starting_coin.amount - start_amount - 1])
|
|
325
|
+
)
|
|
326
|
+
# Create a solution for standard transaction
|
|
327
|
+
delegated_puzzle = p2_conditions.puzzle_for_conditions(conditions)
|
|
328
|
+
full_solution = p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions(conditions)
|
|
329
|
+
starting_coin_spend = make_spend(starting_coin, starting_puzzle, full_solution)
|
|
330
|
+
await make_and_send_spend_bundle(
|
|
331
|
+
sim,
|
|
332
|
+
sim_client,
|
|
333
|
+
[starting_coin_spend, launcher_coin_spend],
|
|
334
|
+
is_eligible_for_ff,
|
|
335
|
+
is_launcher_coin=True,
|
|
336
|
+
signing_puzzle=delegated_puzzle,
|
|
337
|
+
signing_coin=starting_coin,
|
|
338
|
+
)
|
|
339
|
+
eve_coin, _ = await get_singleton_and_remaining_coins(sim)
|
|
340
|
+
inner_conditions = [[ConditionOpcode.CREATE_COIN, inner_puzzle_hash, singleton_amount]]
|
|
341
|
+
eve_coin_spend, eve_signing_puzzle = make_singleton_coin_spend(
|
|
342
|
+
parent_coin_spend=launcher_coin_spend,
|
|
343
|
+
coin_to_spend=eve_coin,
|
|
344
|
+
inner_puzzle=inner_puzzle,
|
|
345
|
+
inner_conditions=inner_conditions,
|
|
346
|
+
is_eve_spend=True,
|
|
347
|
+
)
|
|
348
|
+
return inner_puzzle, eve_coin_spend, eve_signing_puzzle
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
async def prepare_and_test_singleton(
|
|
352
|
+
sim: SpendSim, sim_client: SimClient, is_eligible_for_ff: bool, start_amount: uint64, singleton_amount: uint64
|
|
353
|
+
) -> Tuple[Coin, CoinSpend, Program, Coin]:
|
|
354
|
+
inner_puzzle, eve_coin_spend, eve_signing_puzzle = await prepare_singleton_eve(
|
|
355
|
+
sim, sim_client, is_eligible_for_ff, start_amount, singleton_amount
|
|
356
|
+
)
|
|
357
|
+
# At this point we don't have any unspent singleton
|
|
358
|
+
singleton_puzzle_hash = eve_coin_spend.coin.puzzle_hash
|
|
359
|
+
unspent_lineage_info = await sim_client.service.coin_store.get_unspent_lineage_info_for_puzzle_hash(
|
|
360
|
+
singleton_puzzle_hash
|
|
361
|
+
)
|
|
362
|
+
assert unspent_lineage_info is None
|
|
363
|
+
eve_coin = eve_coin_spend.coin
|
|
364
|
+
await make_and_send_spend_bundle(
|
|
365
|
+
sim, sim_client, [eve_coin_spend], is_eligible_for_ff, signing_puzzle=eve_signing_puzzle, signing_coin=eve_coin
|
|
366
|
+
)
|
|
367
|
+
# Now we spent eve and we have an unspent singleton that we can test with
|
|
368
|
+
singleton, [remaining_coin] = await get_singleton_and_remaining_coins(sim)
|
|
369
|
+
assert singleton.amount == singleton_amount
|
|
370
|
+
singleton_puzzle_hash = eve_coin.puzzle_hash
|
|
371
|
+
unspent_lineage_info = await sim_client.service.coin_store.get_unspent_lineage_info_for_puzzle_hash(
|
|
372
|
+
singleton_puzzle_hash
|
|
373
|
+
)
|
|
374
|
+
assert unspent_lineage_info == UnspentLineageInfo(
|
|
375
|
+
coin_id=singleton.name(),
|
|
376
|
+
coin_amount=singleton.amount,
|
|
377
|
+
parent_id=eve_coin.name(),
|
|
378
|
+
parent_amount=eve_coin.amount,
|
|
379
|
+
parent_parent_id=eve_coin.parent_coin_info,
|
|
380
|
+
)
|
|
381
|
+
return singleton, eve_coin_spend, inner_puzzle, remaining_coin
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
@pytest.mark.anyio
|
|
385
|
+
@pytest.mark.parametrize("is_eligible_for_ff", [True, False])
|
|
386
|
+
async def test_singleton_fast_forward_different_block(is_eligible_for_ff: bool) -> None:
|
|
387
|
+
"""
|
|
388
|
+
This tests uses the `is_eligible_for_ff` parameter to cover both when a
|
|
389
|
+
singleton is eligible for fast forward and when it's not, as we attempt to
|
|
390
|
+
spend an earlier version of it, in a different block, and watch it either
|
|
391
|
+
get properly fast forwarded to the latest unspent (when it's eligible) or
|
|
392
|
+
get correctly rejected as a double spend (when it's not eligible)
|
|
393
|
+
"""
|
|
394
|
+
START_AMOUNT = uint64(1337)
|
|
395
|
+
# We're decrementing the next iteration's amount for testing purposes
|
|
396
|
+
SINGLETON_AMOUNT = uint64(1335)
|
|
397
|
+
# We're incrementing the next iteration's amount for testing purposes
|
|
398
|
+
SINGLETON_CHILD_AMOUNT = uint64(1339)
|
|
399
|
+
async with sim_and_client() as (sim, sim_client):
|
|
400
|
+
singleton, eve_coin_spend, inner_puzzle, remaining_coin = await prepare_and_test_singleton(
|
|
401
|
+
sim, sim_client, is_eligible_for_ff, START_AMOUNT, SINGLETON_AMOUNT
|
|
402
|
+
)
|
|
403
|
+
# Let's spend this first version, to create a bigger singleton child
|
|
404
|
+
singleton_puzzle_hash = eve_coin_spend.coin.puzzle_hash
|
|
405
|
+
inner_puzzle_hash = inner_puzzle.get_tree_hash()
|
|
406
|
+
inner_conditions = [
|
|
407
|
+
[ConditionOpcode.AGG_SIG_UNSAFE, G1Element(), IDENTITY_PUZZLE_HASH],
|
|
408
|
+
[ConditionOpcode.CREATE_COIN, inner_puzzle_hash, SINGLETON_CHILD_AMOUNT],
|
|
409
|
+
]
|
|
410
|
+
singleton_coin_spend, singleton_signing_puzzle = make_singleton_coin_spend(
|
|
411
|
+
eve_coin_spend, singleton, inner_puzzle, inner_conditions
|
|
412
|
+
)
|
|
413
|
+
# Spend also a remaining coin for balance, as we're increasing the singleton amount
|
|
414
|
+
diff_to_balance = SINGLETON_CHILD_AMOUNT - SINGLETON_AMOUNT
|
|
415
|
+
remaining_spend_solution = SerializedProgram.from_program(
|
|
416
|
+
Program.to([[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, remaining_coin.amount - diff_to_balance]])
|
|
417
|
+
)
|
|
418
|
+
remaining_coin_spend = CoinSpend(remaining_coin, IDENTITY_PUZZLE, remaining_spend_solution)
|
|
419
|
+
await make_and_send_spend_bundle(
|
|
420
|
+
sim,
|
|
421
|
+
sim_client,
|
|
422
|
+
[remaining_coin_spend, singleton_coin_spend],
|
|
423
|
+
is_eligible_for_ff,
|
|
424
|
+
signing_puzzle=singleton_signing_puzzle,
|
|
425
|
+
signing_coin=singleton,
|
|
426
|
+
)
|
|
427
|
+
unspent_lineage_info = await sim_client.service.coin_store.get_unspent_lineage_info_for_puzzle_hash(
|
|
428
|
+
singleton_puzzle_hash
|
|
429
|
+
)
|
|
430
|
+
singleton_child, [remaining_coin] = await get_singleton_and_remaining_coins(sim)
|
|
431
|
+
assert singleton_child.amount == SINGLETON_CHILD_AMOUNT
|
|
432
|
+
assert unspent_lineage_info == UnspentLineageInfo(
|
|
433
|
+
coin_id=singleton_child.name(),
|
|
434
|
+
coin_amount=singleton_child.amount,
|
|
435
|
+
parent_id=singleton.name(),
|
|
436
|
+
parent_amount=singleton.amount,
|
|
437
|
+
parent_parent_id=eve_coin_spend.coin.name(),
|
|
438
|
+
)
|
|
439
|
+
# Now let's spend the first version again (despite being already spent by now)
|
|
440
|
+
remaining_spend_solution = SerializedProgram.from_program(
|
|
441
|
+
Program.to([[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, remaining_coin.amount - diff_to_balance]])
|
|
442
|
+
)
|
|
443
|
+
remaining_coin_spend = CoinSpend(remaining_coin, IDENTITY_PUZZLE, remaining_spend_solution)
|
|
444
|
+
status, error = await make_and_send_spend_bundle(
|
|
445
|
+
sim,
|
|
446
|
+
sim_client,
|
|
447
|
+
[remaining_coin_spend, singleton_coin_spend],
|
|
448
|
+
is_eligible_for_ff,
|
|
449
|
+
signing_puzzle=singleton_signing_puzzle,
|
|
450
|
+
signing_coin=singleton,
|
|
451
|
+
)
|
|
452
|
+
if is_eligible_for_ff:
|
|
453
|
+
# Instead of rejecting this as double spend, we perform a fast forward,
|
|
454
|
+
# spending the singleton child as a result, and creating the latest
|
|
455
|
+
# version which is the grandchild in this scenario
|
|
456
|
+
assert status == MempoolInclusionStatus.SUCCESS
|
|
457
|
+
assert error is None
|
|
458
|
+
unspent_lineage_info = await sim_client.service.coin_store.get_unspent_lineage_info_for_puzzle_hash(
|
|
459
|
+
singleton_puzzle_hash
|
|
460
|
+
)
|
|
461
|
+
singleton_grandchild, [remaining_coin] = await get_singleton_and_remaining_coins(sim)
|
|
462
|
+
assert unspent_lineage_info == UnspentLineageInfo(
|
|
463
|
+
coin_id=singleton_grandchild.name(),
|
|
464
|
+
coin_amount=singleton_grandchild.amount,
|
|
465
|
+
parent_id=singleton_child.name(),
|
|
466
|
+
parent_amount=singleton_child.amount,
|
|
467
|
+
parent_parent_id=singleton.name(),
|
|
468
|
+
)
|
|
469
|
+
else:
|
|
470
|
+
# As this singleton is not eligible for fast forward, attempting to
|
|
471
|
+
# spend one of its earlier versions is considered a double spend
|
|
472
|
+
assert status == MempoolInclusionStatus.FAILED
|
|
473
|
+
assert error == Err.DOUBLE_SPEND
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
@pytest.mark.anyio
|
|
477
|
+
async def test_singleton_fast_forward_same_block() -> None:
|
|
478
|
+
"""
|
|
479
|
+
This tests covers sending multiple transactions that spend an already spent
|
|
480
|
+
singleton version, all in the same block, to make sure they get properly
|
|
481
|
+
fast forwarded and chained down to a latest unspent version
|
|
482
|
+
"""
|
|
483
|
+
START_AMOUNT = uint64(1337)
|
|
484
|
+
# We're decrementing the next iteration's amount for testing purposes
|
|
485
|
+
SINGLETON_AMOUNT = uint64(1335)
|
|
486
|
+
# We're incrementing the next iteration's amount for testing purposes
|
|
487
|
+
SINGLETON_CHILD_AMOUNT = uint64(1339)
|
|
488
|
+
async with sim_and_client() as (sim, sim_client):
|
|
489
|
+
singleton, eve_coin_spend, inner_puzzle, remaining_coin = await prepare_and_test_singleton(
|
|
490
|
+
sim, sim_client, True, START_AMOUNT, SINGLETON_AMOUNT
|
|
491
|
+
)
|
|
492
|
+
# Let's spend this first version, to create a bigger singleton child
|
|
493
|
+
singleton_puzzle_hash = eve_coin_spend.coin.puzzle_hash
|
|
494
|
+
inner_puzzle_hash = inner_puzzle.get_tree_hash()
|
|
495
|
+
inner_conditions = [
|
|
496
|
+
[ConditionOpcode.AGG_SIG_UNSAFE, G1Element(), IDENTITY_PUZZLE_HASH],
|
|
497
|
+
[ConditionOpcode.CREATE_COIN, inner_puzzle_hash, SINGLETON_CHILD_AMOUNT],
|
|
498
|
+
]
|
|
499
|
+
singleton_coin_spend, _ = make_singleton_coin_spend(eve_coin_spend, singleton, inner_puzzle, inner_conditions)
|
|
500
|
+
# Spend also a remaining coin for balance, as we're increasing the singleton amount
|
|
501
|
+
diff_to_balance = SINGLETON_CHILD_AMOUNT - SINGLETON_AMOUNT
|
|
502
|
+
remaining_spend_solution = SerializedProgram.from_program(
|
|
503
|
+
Program.to([[ConditionOpcode.CREATE_COIN, IDENTITY_PUZZLE_HASH, remaining_coin.amount - diff_to_balance]])
|
|
504
|
+
)
|
|
505
|
+
remaining_coin_spend = CoinSpend(remaining_coin, IDENTITY_PUZZLE, remaining_spend_solution)
|
|
506
|
+
await make_and_send_spend_bundle(sim, sim_client, [remaining_coin_spend, singleton_coin_spend])
|
|
507
|
+
unspent_lineage_info = await sim_client.service.coin_store.get_unspent_lineage_info_for_puzzle_hash(
|
|
508
|
+
singleton_puzzle_hash
|
|
509
|
+
)
|
|
510
|
+
singleton_child, [remaining_coin] = await get_singleton_and_remaining_coins(sim)
|
|
511
|
+
assert singleton_child.amount == SINGLETON_CHILD_AMOUNT
|
|
512
|
+
assert unspent_lineage_info == UnspentLineageInfo(
|
|
513
|
+
coin_id=singleton_child.name(),
|
|
514
|
+
coin_amount=singleton_child.amount,
|
|
515
|
+
parent_id=singleton.name(),
|
|
516
|
+
parent_amount=singleton.amount,
|
|
517
|
+
parent_parent_id=eve_coin_spend.coin.name(),
|
|
518
|
+
)
|
|
519
|
+
# Now let's send 3 arbitrary spends of the already spent singleton in
|
|
520
|
+
# one block. They should all properly fast forward
|
|
521
|
+
random_amounts = [21, 17, 11]
|
|
522
|
+
signature = G2Element()
|
|
523
|
+
for i in range(3):
|
|
524
|
+
# This cost adjustment allows us to maintain the order of spends due to fee per
|
|
525
|
+
# cost and amounts dynamics
|
|
526
|
+
cost_factor = (i + 1) * 5
|
|
527
|
+
inner_conditions = [
|
|
528
|
+
[ConditionOpcode.AGG_SIG_UNSAFE, G1Element(), IDENTITY_PUZZLE_HASH] for _ in range(cost_factor)
|
|
529
|
+
]
|
|
530
|
+
inner_conditions.append([ConditionOpcode.CREATE_COIN, inner_puzzle_hash, random_amounts[i]])
|
|
531
|
+
singleton_coin_spend, _ = make_singleton_coin_spend(
|
|
532
|
+
eve_coin_spend, singleton, inner_puzzle, inner_conditions
|
|
533
|
+
)
|
|
534
|
+
status, error = await sim_client.push_tx(SpendBundle([singleton_coin_spend], signature))
|
|
535
|
+
assert error is None
|
|
536
|
+
assert status == MempoolInclusionStatus.SUCCESS
|
|
537
|
+
|
|
538
|
+
# Farm a block to process all these spend bundles
|
|
539
|
+
await sim.farm_block()
|
|
540
|
+
unspent_lineage_info = await sim_client.service.coin_store.get_unspent_lineage_info_for_puzzle_hash(
|
|
541
|
+
singleton_puzzle_hash
|
|
542
|
+
)
|
|
543
|
+
latest_singleton, [remaining_coin] = await get_singleton_and_remaining_coins(sim)
|
|
544
|
+
assert unspent_lineage_info is not None
|
|
545
|
+
# The unspent coin ID should reflect the latest version
|
|
546
|
+
assert unspent_lineage_info.coin_id == latest_singleton.name()
|
|
547
|
+
# The latest version should have the last random amount
|
|
548
|
+
assert latest_singleton.amount == random_amounts[-1]
|
|
549
|
+
# The unspent coin amount should reflect the latest version
|
|
550
|
+
assert unspent_lineage_info.coin_amount == latest_singleton.amount
|
|
551
|
+
# The unspent parent ID should reflect the latest version's parent
|
|
552
|
+
assert unspent_lineage_info.parent_id == latest_singleton.parent_coin_info
|
|
553
|
+
# The one before it should have the second last random amount
|
|
554
|
+
assert unspent_lineage_info.parent_amount == random_amounts[-2]
|
|
@@@ -146,7 -146,7 +146,7 @@@ async def test_mempool_mode(softfork_he
|
|
|
146
146
|
binutils.assemble(
|
|
147
147
|
f"(q ((0x3d2331635a58c0d49912bc1427d7db51afe3f20a7b4bcaffa17ee250dcbcbfaa {disassembly} 300"
|
|
148
148
|
f" (() (q . (({unknown_opcode} '00000000000000000000000000000000' 0x0cbba106e000))) ()))))"
|
|
149
|
--
).as_bin()
|
|
149
|
++
).as_bin()
|
|
150
150
|
)
|
|
151
151
|
generator = BlockGenerator(program, [], [])
|
|
152
152
|
npc_result: NPCResult = get_name_puzzle_conditions(
|
|
@@@ -169,7 -169,7 +169,7 @@@
|
|
|
169
169
|
coin = Coin(
|
|
170
170
|
bytes32.fromhex("3d2331635a58c0d49912bc1427d7db51afe3f20a7b4bcaffa17ee250dcbcbfaa"),
|
|
171
171
|
bytes32.fromhex("14947eb0e69ee8fc8279190fc2d38cb4bbb61ba28f1a270cfd643a0e8d759576"),
|
|
172
|
--
300,
|
|
172
|
++
uint64(300),
|
|
173
173
|
)
|
|
174
174
|
spend_info = get_puzzle_and_solution_for_coin(generator, coin, softfork_height, bt.constants)
|
|
175
175
|
assert spend_info.puzzle.to_program() == puzzle
|
|
@@@ -183,9 -183,9 +183,7 @@@ async def test_clvm_mempool_mode(softfo
|
|
|
183
183
|
# if-condition, that depends on executing an unknown operator
|
|
184
184
|
# ("0xfe"). In mempool mode, this should fail, but in non-mempool
|
|
185
185
|
# mode, the unknown operator should be treated as if it returns ().
|
|
186
|
--
program = SerializedProgram.from_bytes(
|
|
187
|
--
binutils.assemble(f"(i (0xfe (q . 0)) (q . ()) {disassembly})").as_bin() # type: ignore[no-untyped-call]
|
|
188
|
--
)
|
|
186
|
++
program = SerializedProgram.from_bytes(binutils.assemble(f"(i (0xfe (q . 0)) (q . ()) {disassembly})").as_bin())
|
|
189
187
|
generator = BlockGenerator(program, [], [])
|
|
190
188
|
npc_result: NPCResult = get_name_puzzle_conditions(
|
|
191
189
|
generator,
|
|
@@@ -236,9 -236,9 +234,7 @@@ async def test_clvm_max_cost(softfork_h
|
|
|
236
234
|
# mode, the unknown operator should be treated as if it returns ().
|
|
237
235
|
# the CLVM program has a cost of 391969
|
|
238
236
|
program = SerializedProgram.from_bytes(
|
|
239
|
--
binutils.assemble(
|
|
240
|
--
f"(i (softfork (q . 10000000)) (q . ()) {disassembly})"
|
|
241
|
--
).as_bin() # type: ignore[no-untyped-call]
|
|
237
|
++
binutils.assemble(f"(i (softfork (q . 10000000)) (q . ()) {disassembly})").as_bin()
|
|
242
238
|
)
|
|
243
239
|
|
|
244
240
|
# ensure we fail if the program exceeds the cost
|
|
@@@ -273,7 -272,7 +269,7 @@@ async def test_standard_tx(benchmark_ru
|
|
|
273
269
|
conditions = binutils.assemble(
|
|
274
270
|
"((51 0x699eca24f2b6f4b25b16f7a418d0dc4fc5fce3b9145aecdda184158927738e3e 10)"
|
|
275
271
|
" (51 0x847bb2385534070c39a39cc5dfdc7b35e2db472dc0ab10ab4dec157a2178adbf 0x00cbba106df6))"
|
|
276
|
--
)
|
|
272
|
++
)
|
|
277
273
|
solution_program = SerializedProgram.from_bytes(
|
|
278
274
|
bytes(p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions(conditions))
|
|
279
275
|
)
|
|
@@@ -4,17 -4,17 +4,23 @@@ import itertool
|
|
|
4
4
|
import random
|
|
5
5
|
from hashlib import sha256
|
|
6
6
|
from itertools import permutations
|
|
7
|
--
from
|
|
7
|
++
from random import Random
|
|
8
|
++
from typing import List, Optional, Tuple
|
|
8
9
|
|
|
9
10
|
import pytest
|
|
10
|
--
from chia_rs import compute_merkle_set_root
|
|
11
|
++
from chia_rs import Coin, compute_merkle_set_root
|
|
11
12
|
|
|
13
|
++
from chia.simulator.block_tools import BlockTools
|
|
12
14
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
15
|
++
from chia.util.hash import std_hash
|
|
16
|
++
from chia.util.ints import uint64
|
|
13
17
|
from chia.util.merkle_set import MerkleSet, confirm_included_already_hashed
|
|
18
|
++
from chia.util.misc import to_batches
|
|
19
|
++
from chia.wallet.util.wallet_sync_utils import validate_additions, validate_removals
|
|
14
20
|
|
|
15
21
|
|
|
16
22
|
@pytest.mark.anyio
|
|
17
|
--
async def test_basics(bt):
|
|
23
|
++
async def test_basics(bt: BlockTools) -> None:
|
|
18
24
|
num_blocks = 20
|
|
19
25
|
blocks = bt.get_consecutive_blocks(num_blocks)
|
|
20
26
|
|
|
@@@ -50,7 -50,7 +56,7 @@@ def hashdown(buf: bytes) -> bytes32
|
|
|
50
56
|
|
|
51
57
|
|
|
52
58
|
@pytest.mark.anyio
|
|
53
|
--
async def test_merkle_set_invalid_hash_size():
|
|
59
|
++
async def test_merkle_set_invalid_hash_size() -> None:
|
|
54
60
|
merkle_set = MerkleSet()
|
|
55
61
|
|
|
56
62
|
# this is too large
|
|
@@@ -76,7 -76,7 +82,7 @@@
|
|
|
76
82
|
|
|
77
83
|
|
|
78
84
|
@pytest.mark.anyio
|
|
79
|
--
async def test_merkle_set_1():
|
|
85
|
++
async def test_merkle_set_1() -> None:
|
|
80
86
|
a = bytes32([0x80] + [0] * 31)
|
|
81
87
|
merkle_set = MerkleSet()
|
|
82
88
|
merkle_set.add_already_hashed(a)
|
|
@@@ -85,7 -85,7 +91,7 @@@
|
|
|
85
91
|
|
|
86
92
|
|
|
87
93
|
@pytest.mark.anyio
|
|
88
|
--
async def test_merkle_set_duplicate():
|
|
94
|
++
async def test_merkle_set_duplicate() -> None:
|
|
89
95
|
a = bytes32([0x80] + [0] * 31)
|
|
90
96
|
merkle_set = MerkleSet()
|
|
91
97
|
merkle_set.add_already_hashed(a)
|
|
@@@ -95,14 -95,14 +101,14 @@@
|
|
|
95
101
|
|
|
96
102
|
|
|
97
103
|
@pytest.mark.anyio
|
|
98
|
--
async def test_merkle_set_0():
|
|
104
|
++
async def test_merkle_set_0() -> None:
|
|
99
105
|
merkle_set = MerkleSet()
|
|
100
106
|
assert merkle_set.get_root() == bytes32(compute_merkle_set_root([]))
|
|
101
107
|
assert merkle_set.get_root() == bytes32([0] * 32)
|
|
102
108
|
|
|
103
109
|
|
|
104
110
|
@pytest.mark.anyio
|
|
105
|
--
async def test_merkle_set_2():
|
|
111
|
++
async def test_merkle_set_2() -> None:
|
|
106
112
|
a = bytes32([0x80] + [0] * 31)
|
|
107
113
|
b = bytes32([0x70] + [0] * 31)
|
|
108
114
|
merkle_set = MerkleSet()
|
|
@@@ -113,7 -113,7 +119,7 @@@
|
|
|
113
119
|
|
|
114
120
|
|
|
115
121
|
@pytest.mark.anyio
|
|
116
|
--
async def test_merkle_set_2_reverse():
|
|
122
|
++
async def test_merkle_set_2_reverse() -> None:
|
|
117
123
|
a = bytes32([0x80] + [0] * 31)
|
|
118
124
|
b = bytes32([0x70] + [0] * 31)
|
|
119
125
|
merkle_set = MerkleSet()
|
|
@@@ -124,7 -124,7 +130,7 @@@
|
|
|
124
130
|
|
|
125
131
|
|
|
126
132
|
@pytest.mark.anyio
|
|
127
|
--
async def test_merkle_set_3():
|
|
133
|
++
async def test_merkle_set_3() -> None:
|
|
128
134
|
a = bytes32([0x80] + [0] * 31)
|
|
129
135
|
b = bytes32([0x70] + [0] * 31)
|
|
130
136
|
c = bytes32([0x71] + [0] * 31)
|
|
@@@ -145,7 -145,7 +151,7 @@@
|
|
|
145
151
|
|
|
146
152
|
|
|
147
153
|
@pytest.mark.anyio
|
|
148
|
--
async def test_merkle_set_4():
|
|
154
|
++
async def test_merkle_set_4() -> None:
|
|
149
155
|
a = bytes32([0x80] + [0] * 31)
|
|
150
156
|
b = bytes32([0x70] + [0] * 31)
|
|
151
157
|
c = bytes32([0x71] + [0] * 31)
|
|
@@@ -167,7 -167,7 +173,7 @@@
|
|
|
167
173
|
|
|
168
174
|
|
|
169
175
|
@pytest.mark.anyio
|
|
170
|
--
async def test_merkle_set_5():
|
|
176
|
++
async def test_merkle_set_5() -> None:
|
|
171
177
|
BLANK = bytes32([0] * 32)
|
|
172
178
|
|
|
173
179
|
a = bytes32([0x58] + [0] * 31)
|
|
@@@ -216,7 -216,7 +222,7 @@@
|
|
|
216
222
|
|
|
217
223
|
|
|
218
224
|
@pytest.mark.anyio
|
|
219
|
--
async def test_merkle_left_edge():
|
|
225
|
++
async def test_merkle_left_edge() -> None:
|
|
220
226
|
BLANK = bytes32([0] * 32)
|
|
221
227
|
a = bytes32([0x80] + [0] * 31)
|
|
222
228
|
b = bytes32([0] * 31 + [1])
|
|
@@@ -257,7 -257,7 +263,7 @@@
|
|
|
257
263
|
|
|
258
264
|
|
|
259
265
|
@pytest.mark.anyio
|
|
260
|
--
async def test_merkle_right_edge():
|
|
266
|
++
async def test_merkle_right_edge() -> None:
|
|
261
267
|
BLANK = bytes32([0] * 32)
|
|
262
268
|
a = bytes32([0x40] + [0] * 31)
|
|
263
269
|
b = bytes32([0xFF] * 31 + [0xFF])
|
|
@@@ -306,7 -306,7 +312,7 @@@ def rand_hash(rng: random.Random) -> by
|
|
|
306
312
|
|
|
307
313
|
@pytest.mark.anyio
|
|
308
314
|
@pytest.mark.skip("This test is expensive and has already convinced us there are no discrepancies")
|
|
309
|
--
async def test_merkle_set_random_regression():
|
|
315
|
++
async def test_merkle_set_random_regression() -> None:
|
|
310
316
|
rng = random.Random()
|
|
311
317
|
rng.seed(123456)
|
|
312
318
|
for i in range(100):
|
|
@@@ -323,3 -323,3 +329,44 @@@
|
|
|
323
329
|
python_root = merkle_set.get_root()
|
|
324
330
|
rust_root = bytes32(compute_merkle_set_root(values))
|
|
325
331
|
assert rust_root == python_root
|
|
332
|
++
|
|
333
|
++
|
|
334
|
++
def make_test_coins(n: int, rng: Random) -> List[Coin]:
|
|
335
|
++
return [Coin(bytes32.random(rng), bytes32.random(rng), uint64(rng.randint(0, 10000000))) for i in range(n)]
|
|
336
|
++
|
|
337
|
++
|
|
338
|
++
@pytest.mark.parametrize("num_coins", [0, 1, 2, 100, 1337])
|
|
339
|
++
def test_validate_removals_full_list(num_coins: int, seeded_random: Random) -> None:
|
|
340
|
++
# when we have all the removals, we don't need to include a proof, because
|
|
341
|
++
# the root can be computed by all the removals
|
|
342
|
++
coins = make_test_coins(num_coins, seeded_random)
|
|
343
|
++
|
|
344
|
++
removals_merkle_set = MerkleSet()
|
|
345
|
++
coin_map: List[Tuple[bytes32, Optional[Coin]]] = []
|
|
346
|
++
for coin in coins:
|
|
347
|
++
removals_merkle_set.add_already_hashed(coin.name())
|
|
348
|
++
coin_map.append((coin.name(), coin))
|
|
349
|
++
removals_root = removals_merkle_set.get_root()
|
|
350
|
++
|
|
351
|
++
assert validate_removals(coin_map, None, removals_root) is True
|
|
352
|
++
|
|
353
|
++
|
|
354
|
++
@pytest.mark.parametrize("num_coins", [0, 1, 2, 100, 1337])
|
|
355
|
++
@pytest.mark.parametrize("batch_size", [1, 2, 10])
|
|
356
|
++
def test_validate_additions_full_list(num_coins: int, batch_size: int, seeded_random: Random) -> None:
|
|
357
|
++
# when we have all the removals, we don't need to include a proof, because
|
|
358
|
++
# the root can be computed by all the removals
|
|
359
|
++
coins = make_test_coins(num_coins, seeded_random)
|
|
360
|
++
|
|
361
|
++
additions_merkle_set = MerkleSet()
|
|
362
|
++
additions: List[Tuple[bytes32, List[Coin]]] = []
|
|
363
|
++
for coin_batch in to_batches(coins, batch_size):
|
|
364
|
++
puzzle_hash = bytes32.random(seeded_random)
|
|
365
|
++
additions.append((puzzle_hash, coin_batch.entries))
|
|
366
|
++
additions_merkle_set.add_already_hashed(puzzle_hash)
|
|
367
|
++
additions_merkle_set.add_already_hashed(
|
|
368
|
++
std_hash(b"".join(sorted([coin.name() for coin in coin_batch.entries], reverse=True)))
|
|
369
|
++
)
|
|
370
|
++
additions_root = additions_merkle_set.get_root()
|
|
371
|
++
|
|
372
|
++
assert validate_additions(additions, None, additions_root) is True
|
|
@@@ -1,49 -1,0 +1,49 @@@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from clvm.SExp import CastableType
|
|
6
|
+
from clvm_tools import binutils
|
|
7
|
+
|
|
8
|
+
from chia.types.blockchain_format.program import Program
|
|
9
|
+
from chia.types.blockchain_format.serialized_program import SerializedProgram
|
|
10
|
+
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
11
|
+
from chia.util.ints import uint32, uint64
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def program_roundtrip(o: CastableType) -> None:
|
|
15
|
+
prg1 = Program.to(o)
|
|
16
|
+
prg2 = SerializedProgram.to(o)
|
|
17
|
+
prg3 = SerializedProgram.from_program(prg1)
|
|
18
|
+
prg4 = SerializedProgram.from_bytes(prg1.as_bin())
|
|
19
|
+
prg5 = prg2.to_program()
|
|
20
|
+
|
|
21
|
+
assert bytes(prg1) == bytes(prg2)
|
|
22
|
+
assert bytes(prg1) == bytes(prg3)
|
|
23
|
+
assert bytes(prg1) == bytes(prg4)
|
|
24
|
+
assert bytes(prg1) == bytes(prg5)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def test_serialized_program_to() -> None:
|
|
28
|
+
prg = "(q ((0x0101010101010101010101010101010101010101010101010101010101010101 80 123 (() (q . ())))))" # noqa
|
|
29
|
+
tests: List[CastableType] = [
|
|
30
|
+
0,
|
|
31
|
+
1,
|
|
32
|
+
(1, 2),
|
|
33
|
+
[0, 1, 2],
|
|
34
|
+
Program.to([1, 2, 3]),
|
|
35
|
+
SerializedProgram.to([1, 2, 3]),
|
|
36
|
+
b"123",
|
|
1
|
-
binutils.assemble(prg),
|
|
37
|
++
binutils.assemble(prg),
|
|
38
|
+
[b"1", b"2", b"3"],
|
|
39
|
+
(b"1", (b"2", b"3")),
|
|
40
|
+
None,
|
|
41
|
+
-24,
|
|
42
|
+
bytes32.fromhex("0" * 64),
|
|
43
|
+
bytes.fromhex("0" * 6),
|
|
44
|
+
uint32(123),
|
|
45
|
+
uint64(123123),
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
for t in tests:
|
|
49
|
+
program_roundtrip(t)
|
|
@@@ -652,7 -652,7 +652,7 @@@ def test_ambiguous_deserialization_prog
|
|
|
652
652
|
class TestClassProgram(Streamable):
|
|
653
653
|
a: Program
|
|
654
654
|
|
|
655
|
--
program = Program.to(binutils.assemble("()"))
|
|
655
|
++
program = Program.to(binutils.assemble("()"))
|
|
656
656
|
|
|
657
657
|
TestClassProgram.from_bytes(bytes(program))
|
|
658
658
|
|
|
@@@ -1,0 -1,0 +1,19 @@@
|
|
|
1
|
++
from __future__ import annotations
|
|
2
|
++
|
|
3
|
++
from typing import TYPE_CHECKING, Callable, Optional
|
|
4
|
++
|
|
5
|
++
if TYPE_CHECKING:
|
|
6
|
++
from tests.util.misc import TestId
|
|
7
|
++
|
|
8
|
++
# NOTE: Do not just put any useful thing here. This is specifically for making
|
|
9
|
++
# fixture values globally available during tests. In _most_ cases fixtures
|
|
10
|
++
# should be directly requested using normal mechanisms. Very little should
|
|
11
|
++
# be put here.
|
|
12
|
++
|
|
13
|
++
# NOTE: When using this module do not import the attributes directly. Rather, import
|
|
14
|
++
# something like `from tests import ether`. Importing attributes directly will
|
|
15
|
++
# result in you likely getting the default `None` values since they are not
|
|
16
|
++
# populated until tests are running.
|
|
17
|
++
|
|
18
|
++
record_property: Optional[Callable[[str, object], None]] = None
|
|
19
|
++
test_id: Optional[TestId] = None
|
|
@@@ -1,470 -1,0 +1,470 @@@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import base64
|
|
5
|
+
import dataclasses
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
from os.path import dirname
|
|
9
|
+
from typing import Any, List, Optional, Tuple, Union, cast
|
|
10
|
+
|
|
11
|
+
import pytest
|
|
12
|
+
from chia_rs import G1Element
|
|
13
|
+
from pytest_mock import MockerFixture
|
|
14
|
+
|
|
15
|
+
from chia.consensus.blockchain import AddBlockResult
|
|
16
|
+
from chia.consensus.multiprocess_validation import PreValidationResult
|
|
17
|
+
from chia.farmer.farmer import Farmer, calculate_harvester_fee_quality
|
|
18
|
+
from chia.farmer.farmer_api import FarmerAPI
|
|
19
|
+
from chia.full_node.full_node import FullNode
|
|
20
|
+
from chia.full_node.full_node_api import FullNodeAPI
|
|
21
|
+
from chia.harvester.harvester import Harvester
|
|
22
|
+
from chia.harvester.harvester_api import HarvesterAPI
|
|
23
|
+
from chia.protocols import farmer_protocol, full_node_protocol, harvester_protocol, timelord_protocol
|
|
24
|
+
from chia.protocols.farmer_protocol import RequestSignedValues
|
|
25
|
+
from chia.protocols.harvester_protocol import ProofOfSpaceFeeInfo, RespondSignatures, SigningDataKind
|
|
26
|
+
from chia.protocols.protocol_message_types import ProtocolMessageTypes
|
|
27
|
+
from chia.server.outbound_message import Message, NodeType, make_msg
|
|
28
|
+
from chia.server.server import ChiaServer
|
|
29
|
+
from chia.server.ws_connection import WSChiaConnection
|
|
30
|
+
from chia.simulator.block_tools import BlockTools
|
|
31
|
+
from chia.types.aliases import FarmerService, FullNodeService, HarvesterService, SimulatorFullNodeService
|
|
32
|
+
from chia.types.blockchain_format.classgroup import ClassgroupElement
|
|
33
|
+
from chia.types.blockchain_format.foliage import FoliageBlockData, FoliageTransactionBlock
|
|
34
|
+
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
|
|
35
|
+
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
36
|
+
from chia.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot
|
|
37
|
+
from chia.types.full_block import FullBlock
|
|
38
|
+
from chia.types.peer_info import UnresolvedPeerInfo
|
|
39
|
+
from chia.util.bech32m import decode_puzzle_hash
|
|
40
|
+
from chia.util.hash import std_hash
|
|
41
|
+
from chia.util.ints import uint8, uint32, uint64
|
|
42
|
+
from tests.util.time_out_assert import time_out_assert
|
|
43
|
+
|
|
44
|
+
SPType = Union[timelord_protocol.NewEndOfSubSlotVDF, timelord_protocol.NewSignagePointVDF]
|
|
45
|
+
SPList = List[SPType]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@pytest.mark.anyio
|
|
49
|
+
async def test_harvester_receive_source_signing_data(
|
|
50
|
+
farmer_harvester_2_simulators_zero_bits_plot_filter: Tuple[
|
|
51
|
+
FarmerService,
|
|
52
|
+
HarvesterService,
|
|
53
|
+
Union[FullNodeService, SimulatorFullNodeService],
|
|
54
|
+
Union[FullNodeService, SimulatorFullNodeService],
|
|
55
|
+
BlockTools,
|
|
56
|
+
],
|
|
57
|
+
mocker: MockerFixture,
|
|
58
|
+
) -> None:
|
|
59
|
+
"""
|
|
60
|
+
Tests that the source data for the signatures requests sent to the
|
|
61
|
+
harvester are indeed available and also tests that overrides of
|
|
62
|
+
the farmer reward address, as specified by the harvester, are respected.
|
|
63
|
+
See: CHIP-22: https://github.com/Chia-Network/chips/pull/88
|
|
64
|
+
"""
|
|
65
|
+
(
|
|
66
|
+
farmer_service,
|
|
67
|
+
harvester_service,
|
|
68
|
+
full_node_service_1,
|
|
69
|
+
full_node_service_2,
|
|
70
|
+
_,
|
|
71
|
+
) = farmer_harvester_2_simulators_zero_bits_plot_filter
|
|
72
|
+
|
|
73
|
+
farmer: Farmer = farmer_service._node
|
|
74
|
+
harvester: Harvester = harvester_service._node
|
|
75
|
+
full_node_1: FullNode = full_node_service_1._node
|
|
76
|
+
full_node_2: FullNode = full_node_service_2._node
|
|
77
|
+
|
|
78
|
+
# Connect peers to each other
|
|
79
|
+
farmer_service.add_peer(
|
|
80
|
+
UnresolvedPeerInfo(str(full_node_service_2.self_hostname), full_node_service_2._server.get_port())
|
|
81
|
+
)
|
|
82
|
+
full_node_service_2.add_peer(
|
|
83
|
+
UnresolvedPeerInfo(str(full_node_service_1.self_hostname), full_node_service_1._server.get_port())
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
await wait_until_node_type_connected(farmer.server, NodeType.FULL_NODE)
|
|
87
|
+
await wait_until_node_type_connected(farmer.server, NodeType.HARVESTER) # Should already be connected
|
|
88
|
+
await wait_until_node_type_connected(full_node_1.server, NodeType.FULL_NODE)
|
|
89
|
+
|
|
90
|
+
# Prepare test data
|
|
91
|
+
blocks: List[FullBlock]
|
|
92
|
+
signage_points: SPList
|
|
93
|
+
|
|
94
|
+
(blocks, signage_points) = load_test_data()
|
|
95
|
+
assert len(blocks) == 1
|
|
96
|
+
|
|
97
|
+
# Inject full node with a pre-existing block to skip initial genesis sub-slot
|
|
98
|
+
# so that we have blocks generated that have our farmer reward address, instead
|
|
99
|
+
# of the GENESIS_PRE_FARM_FARMER_PUZZLE_HASH.
|
|
100
|
+
await add_test_blocks_into_full_node(blocks, full_node_2)
|
|
101
|
+
|
|
102
|
+
validated_foliage_data = False
|
|
103
|
+
validated_foliage_transaction = False
|
|
104
|
+
validated_cc_vdf = False
|
|
105
|
+
validated_rc_vdf = False
|
|
106
|
+
validated_sub_slot_cc = False
|
|
107
|
+
validated_sub_slot_rc = False
|
|
108
|
+
# validated_partial = False # Not covered currently. See comment in validate_harvester_request_signatures
|
|
109
|
+
|
|
110
|
+
finished_validating_data = False
|
|
111
|
+
farmer_reward_address = decode_puzzle_hash("txch1psqeaw0h244v5sy2r4se8pheyl62n8778zl6t5e7dep0xch9xfkqhx2mej")
|
|
112
|
+
|
|
113
|
+
async def intercept_harvester_request_signatures(*args: Any) -> Message:
|
|
114
|
+
request: harvester_protocol.RequestSignatures = harvester_protocol.RequestSignatures.from_bytes(args[0])
|
|
115
|
+
nonlocal harvester
|
|
116
|
+
nonlocal farmer_reward_address
|
|
117
|
+
|
|
118
|
+
validate_harvester_request_signatures(request)
|
|
119
|
+
result_msg: Optional[Message] = await HarvesterAPI.request_signatures(
|
|
120
|
+
cast(HarvesterAPI, harvester.server.api), request
|
|
121
|
+
)
|
|
122
|
+
assert result_msg is not None
|
|
123
|
+
|
|
124
|
+
# Inject overridden farmer reward address
|
|
125
|
+
response: RespondSignatures = dataclasses.replace(
|
|
126
|
+
RespondSignatures.from_bytes(result_msg.data), farmer_reward_address_override=farmer_reward_address
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
return make_msg(ProtocolMessageTypes.respond_signatures, response)
|
|
130
|
+
|
|
131
|
+
def validate_harvester_request_signatures(request: harvester_protocol.RequestSignatures) -> None:
|
|
132
|
+
nonlocal full_node_2
|
|
133
|
+
nonlocal farmer_reward_address
|
|
134
|
+
nonlocal validated_foliage_data
|
|
135
|
+
nonlocal validated_foliage_transaction
|
|
136
|
+
nonlocal validated_cc_vdf
|
|
137
|
+
nonlocal validated_rc_vdf
|
|
138
|
+
nonlocal validated_sub_slot_cc
|
|
139
|
+
nonlocal validated_sub_slot_rc
|
|
140
|
+
nonlocal finished_validating_data
|
|
141
|
+
|
|
142
|
+
assert request.message_data is not None
|
|
143
|
+
assert len(request.messages) > 0
|
|
144
|
+
assert len(request.messages) == len(request.message_data)
|
|
145
|
+
|
|
146
|
+
for hash, src in zip(request.messages, request.message_data):
|
|
147
|
+
assert hash
|
|
148
|
+
assert src
|
|
149
|
+
|
|
150
|
+
data: Optional[
|
|
151
|
+
Union[
|
|
152
|
+
FoliageBlockData,
|
|
153
|
+
FoliageTransactionBlock,
|
|
154
|
+
ClassgroupElement,
|
|
155
|
+
ChallengeChainSubSlot,
|
|
156
|
+
RewardChainSubSlot,
|
|
157
|
+
]
|
|
158
|
+
] = None
|
|
159
|
+
if src.kind == uint8(SigningDataKind.FOLIAGE_BLOCK_DATA):
|
|
160
|
+
data = FoliageBlockData.from_bytes(src.data)
|
|
161
|
+
assert (
|
|
162
|
+
data.farmer_reward_puzzle_hash == farmer_reward_address
|
|
163
|
+
or data.farmer_reward_puzzle_hash
|
|
164
|
+
== bytes32(full_node_2.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH)
|
|
165
|
+
)
|
|
166
|
+
if data.farmer_reward_puzzle_hash == farmer_reward_address:
|
|
167
|
+
validated_foliage_data = True
|
|
168
|
+
elif src.kind == uint8(SigningDataKind.FOLIAGE_TRANSACTION_BLOCK):
|
|
169
|
+
data = FoliageTransactionBlock.from_bytes(src.data)
|
|
170
|
+
validated_foliage_transaction = True
|
|
171
|
+
elif src.kind == uint8(SigningDataKind.CHALLENGE_CHAIN_VDF):
|
|
172
|
+
data = ClassgroupElement.from_bytes(src.data)
|
|
173
|
+
validated_cc_vdf = True
|
|
174
|
+
elif src.kind == uint8(SigningDataKind.REWARD_CHAIN_VDF):
|
|
175
|
+
data = ClassgroupElement.from_bytes(src.data)
|
|
176
|
+
validated_rc_vdf = True
|
|
177
|
+
elif src.kind == uint8(SigningDataKind.CHALLENGE_CHAIN_SUB_SLOT):
|
|
178
|
+
data = ChallengeChainSubSlot.from_bytes(src.data)
|
|
179
|
+
validated_sub_slot_cc = True
|
|
180
|
+
elif src.kind == uint8(SigningDataKind.REWARD_CHAIN_SUB_SLOT):
|
|
181
|
+
data = RewardChainSubSlot.from_bytes(src.data)
|
|
182
|
+
validated_sub_slot_rc = True
|
|
183
|
+
# #NOTE: This data type is difficult to trigger, so it is
|
|
184
|
+
# not tested for the time being.
|
|
185
|
+
# data = PostPartialPayload.from_bytes(src.data)
|
|
186
|
+
# validated_partial = True
|
|
187
|
+
# elif src.kind == uint8(SigningDataKind.PARTIAL):
|
|
188
|
+
# pass
|
|
189
|
+
|
|
190
|
+
finished_validating_data = (
|
|
191
|
+
validated_foliage_data
|
|
192
|
+
and validated_foliage_transaction
|
|
193
|
+
and validated_cc_vdf
|
|
194
|
+
and validated_rc_vdf
|
|
195
|
+
and validated_sub_slot_cc
|
|
196
|
+
and validated_sub_slot_rc
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
assert data is not None
|
|
200
|
+
data_hash = data.get_hash()
|
|
201
|
+
assert data_hash == hash
|
|
202
|
+
|
|
203
|
+
async def intercept_farmer_new_proof_of_space(*args: Any) -> None:
|
|
204
|
+
nonlocal farmer
|
|
205
|
+
nonlocal farmer_reward_address
|
|
206
|
+
|
|
207
|
+
request: harvester_protocol.NewProofOfSpace = dataclasses.replace(
|
|
208
|
+
harvester_protocol.NewProofOfSpace.from_bytes(args[0]), farmer_reward_address_override=farmer_reward_address
|
|
209
|
+
)
|
|
210
|
+
peer: WSChiaConnection = args[1]
|
|
211
|
+
|
|
212
|
+
await FarmerAPI.new_proof_of_space(farmer.server.api, request, peer)
|
|
213
|
+
|
|
214
|
+
async def intercept_farmer_request_signed_values(*args: Any) -> Optional[Message]:
|
|
215
|
+
nonlocal farmer
|
|
216
|
+
nonlocal farmer_reward_address
|
|
217
|
+
nonlocal full_node_2
|
|
218
|
+
|
|
219
|
+
request: RequestSignedValues = RequestSignedValues.from_bytes(args[0])
|
|
220
|
+
|
|
221
|
+
# Ensure the FullNode included the source data for the signatures
|
|
222
|
+
assert request.foliage_block_data
|
|
223
|
+
assert request.foliage_block_data.get_hash() == request.foliage_block_data_hash
|
|
224
|
+
assert request.foliage_transaction_block_data
|
|
225
|
+
assert request.foliage_transaction_block_data.get_hash() == request.foliage_transaction_block_hash
|
|
226
|
+
|
|
227
|
+
assert (
|
|
228
|
+
request.foliage_block_data.farmer_reward_puzzle_hash == farmer_reward_address
|
|
229
|
+
or request.foliage_block_data.farmer_reward_puzzle_hash
|
|
230
|
+
== bytes32(full_node_2.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH)
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
return await FarmerAPI.request_signed_values(farmer.server.api, request)
|
|
234
|
+
|
|
235
|
+
mocker.patch.object(farmer.server.api, "request_signed_values", side_effect=intercept_farmer_request_signed_values)
|
|
236
|
+
mocker.patch.object(farmer.server.api, "new_proof_of_space", side_effect=intercept_farmer_new_proof_of_space)
|
|
237
|
+
mocker.patch.object(harvester.server.api, "request_signatures", side_effect=intercept_harvester_request_signatures)
|
|
238
|
+
|
|
239
|
+
# Start injecting signage points
|
|
240
|
+
await inject_signage_points(signage_points, full_node_1, full_node_2)
|
|
241
|
+
|
|
242
|
+
# Wait until test finishes
|
|
243
|
+
def did_finished_validating_data() -> bool:
|
|
244
|
+
return finished_validating_data
|
|
245
|
+
|
|
246
|
+
await time_out_assert(60 * 60, did_finished_validating_data, True)
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
@pytest.mark.anyio
|
|
250
|
+
async def test_harvester_fee_convention(
|
|
251
|
+
farmer_harvester_2_simulators_zero_bits_plot_filter: Tuple[
|
|
252
|
+
FarmerService,
|
|
253
|
+
HarvesterService,
|
|
254
|
+
Union[FullNodeService, SimulatorFullNodeService],
|
|
255
|
+
Union[FullNodeService, SimulatorFullNodeService],
|
|
256
|
+
BlockTools,
|
|
257
|
+
],
|
|
258
|
+
caplog: pytest.LogCaptureFixture,
|
|
259
|
+
) -> None:
|
|
260
|
+
"""
|
|
261
|
+
Tests fee convention specified in CHIP-22: https://github.com/Chia-Network/chips/pull/88
|
|
262
|
+
"""
|
|
263
|
+
(
|
|
264
|
+
farmer_service,
|
|
265
|
+
_,
|
|
266
|
+
_,
|
|
267
|
+
_,
|
|
268
|
+
_,
|
|
269
|
+
) = farmer_harvester_2_simulators_zero_bits_plot_filter
|
|
270
|
+
|
|
271
|
+
caplog.set_level(logging.DEBUG)
|
|
272
|
+
farmer: Farmer = farmer_service._node
|
|
273
|
+
(sp, pos) = prepare_sp_and_pos_for_fee_test(1)
|
|
274
|
+
farmer.notify_farmer_reward_taken_by_harvester_as_fee(sp, pos)
|
|
275
|
+
|
|
276
|
+
assert await scan_log_for_message(caplog, "Fee threshold passed for challenge")
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
@pytest.mark.anyio
|
|
280
|
+
async def test_harvester_fee_invalid_convention(
|
|
281
|
+
farmer_harvester_2_simulators_zero_bits_plot_filter: Tuple[
|
|
282
|
+
FarmerService,
|
|
283
|
+
HarvesterService,
|
|
284
|
+
Union[FullNodeService, SimulatorFullNodeService],
|
|
285
|
+
Union[FullNodeService, SimulatorFullNodeService],
|
|
286
|
+
BlockTools,
|
|
287
|
+
],
|
|
288
|
+
caplog: pytest.LogCaptureFixture,
|
|
289
|
+
) -> None:
|
|
290
|
+
"""
|
|
291
|
+
Tests that logs are properly emitted when an invalid free threshold is specified
|
|
292
|
+
given the fee convention from CHIP-22: https://github.com/Chia-Network/chips/pull/88
|
|
293
|
+
"""
|
|
294
|
+
(
|
|
295
|
+
farmer_service,
|
|
296
|
+
_,
|
|
297
|
+
_,
|
|
298
|
+
_,
|
|
299
|
+
_,
|
|
300
|
+
) = farmer_harvester_2_simulators_zero_bits_plot_filter
|
|
301
|
+
|
|
302
|
+
farmer: Farmer = farmer_service._node
|
|
303
|
+
caplog.set_level(logging.DEBUG)
|
|
304
|
+
|
|
305
|
+
(sp, pos) = prepare_sp_and_pos_for_fee_test(-1)
|
|
306
|
+
farmer.notify_farmer_reward_taken_by_harvester_as_fee(sp, pos)
|
|
307
|
+
farmer.log.propagate
|
|
308
|
+
|
|
309
|
+
assert await scan_log_for_message(caplog, "Invalid fee threshold for challenge")
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def prepare_sp_and_pos_for_fee_test(
|
|
313
|
+
fee_threshold_offset: int,
|
|
314
|
+
) -> Tuple[farmer_protocol.NewSignagePoint, harvester_protocol.NewProofOfSpace]:
|
|
315
|
+
proof = std_hash(b"1")
|
|
316
|
+
challenge = std_hash(b"1")
|
|
317
|
+
|
|
318
|
+
fee_quality = calculate_harvester_fee_quality(proof, challenge)
|
|
319
|
+
|
|
320
|
+
pubkey = G1Element.from_bytes(
|
|
321
|
+
bytes.fromhex(
|
|
322
|
+
"80a836a74b077cabaca7a76d1c3c9f269f7f3a8f2fa196a65ee8953eb81274eb8b7328d474982617af5a0fe71b47e9b8"
|
|
323
|
+
)
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
# Send some fake data to the framer
|
|
327
|
+
sp = farmer_protocol.NewSignagePoint(
|
|
328
|
+
challenge_hash=challenge,
|
|
329
|
+
challenge_chain_sp=challenge,
|
|
330
|
+
reward_chain_sp=challenge,
|
|
331
|
+
difficulty=uint64(0),
|
|
332
|
+
sub_slot_iters=uint64(0),
|
|
333
|
+
signage_point_index=uint8(0),
|
|
334
|
+
peak_height=uint32(1),
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
pos = harvester_protocol.NewProofOfSpace(
|
|
338
|
+
challenge_hash=challenge,
|
|
339
|
+
sp_hash=challenge,
|
|
340
|
+
plot_identifier="foo.plot",
|
|
341
|
+
proof=ProofOfSpace(
|
|
342
|
+
challenge=challenge,
|
|
343
|
+
pool_public_key=None,
|
|
344
|
+
pool_contract_puzzle_hash=None,
|
|
345
|
+
plot_public_key=pubkey,
|
|
1
|
-
size=len(proof),
|
|
346
|
++
size=uint8(len(proof)),
|
|
347
|
+
proof=proof,
|
|
348
|
+
),
|
|
349
|
+
signage_point_index=uint8(0),
|
|
350
|
+
include_source_signature_data=False,
|
|
351
|
+
farmer_reward_address_override=decode_puzzle_hash(
|
|
352
|
+
"txch1psqeaw0h244v5sy2r4se8pheyl62n8778zl6t5e7dep0xch9xfkqhx2mej"
|
|
353
|
+
),
|
|
354
|
+
fee_info=ProofOfSpaceFeeInfo(
|
|
355
|
+
# Apply threshold offset to make the fee either pass or fail
|
|
356
|
+
applied_fee_threshold=uint32(fee_quality + fee_threshold_offset)
|
|
357
|
+
),
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
return (sp, pos)
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
async def scan_log_for_message(caplog: pytest.LogCaptureFixture, find_message: str) -> bool: # pragma: no cover
|
|
364
|
+
log_text_len = 0
|
|
365
|
+
|
|
366
|
+
def log_has_new_text() -> bool:
|
|
367
|
+
nonlocal caplog
|
|
368
|
+
nonlocal log_text_len
|
|
369
|
+
|
|
370
|
+
text_len = len(caplog.text)
|
|
371
|
+
if text_len > log_text_len:
|
|
372
|
+
log_text_len = text_len
|
|
373
|
+
return True
|
|
374
|
+
|
|
375
|
+
return False
|
|
376
|
+
|
|
377
|
+
await time_out_assert(60, log_has_new_text, True)
|
|
378
|
+
|
|
379
|
+
log_text = caplog.text
|
|
380
|
+
find_index = 0
|
|
381
|
+
fail_count = 0
|
|
382
|
+
max_fails = 10
|
|
383
|
+
|
|
384
|
+
for _ in range(max_fails):
|
|
385
|
+
index = log_text.find(find_message, find_index)
|
|
386
|
+
if index >= 0:
|
|
387
|
+
return True
|
|
388
|
+
|
|
389
|
+
fail_count += 1
|
|
390
|
+
assert fail_count < max_fails
|
|
391
|
+
await time_out_assert(10, log_has_new_text, True)
|
|
392
|
+
log_text = caplog.text
|
|
393
|
+
|
|
394
|
+
return False
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
async def wait_until_node_type_connected(server: ChiaServer, node_type: NodeType) -> WSChiaConnection:
|
|
398
|
+
while True:
|
|
399
|
+
for peer in server.all_connections.values():
|
|
400
|
+
if peer.connection_type == node_type.value:
|
|
401
|
+
return peer
|
|
402
|
+
await asyncio.sleep(1)
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
def decode_sp(
|
|
406
|
+
is_sub_slot: bool, sp64: str
|
|
407
|
+
) -> Union[timelord_protocol.NewEndOfSubSlotVDF, timelord_protocol.NewSignagePointVDF]:
|
|
408
|
+
sp_bytes = base64.b64decode(sp64)
|
|
409
|
+
if is_sub_slot:
|
|
410
|
+
return timelord_protocol.NewEndOfSubSlotVDF.from_bytes(sp_bytes)
|
|
411
|
+
|
|
412
|
+
return timelord_protocol.NewSignagePointVDF.from_bytes(sp_bytes)
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
async def add_test_blocks_into_full_node(blocks: List[FullBlock], full_node: FullNode) -> None:
|
|
416
|
+
# Inject full node with a pre-existing block to skip initial genesis sub-slot
|
|
417
|
+
# so that we have blocks generated that have our farmer reward address, instead
|
|
418
|
+
# of the GENESIS_PRE_FARM_FARMER_PUZZLE_HASH.
|
|
419
|
+
pre_validation_results: List[PreValidationResult] = await full_node.blockchain.pre_validate_blocks_multiprocessing(
|
|
420
|
+
blocks, {}, validate_signatures=True
|
|
421
|
+
)
|
|
422
|
+
assert pre_validation_results is not None and len(pre_validation_results) == len(blocks)
|
|
423
|
+
for i in range(len(blocks)):
|
|
424
|
+
r, _, _ = await full_node.blockchain.add_block(blocks[i], pre_validation_results[i])
|
|
425
|
+
assert r == AddBlockResult.NEW_PEAK
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
async def inject_signage_points(signage_points: SPList, full_node_1: FullNode, full_node_2: FullNode) -> None:
|
|
429
|
+
full_node_2_peer_1 = [
|
|
430
|
+
n for n in list(full_node_2.server.all_connections.values()) if n.local_type == NodeType.FULL_NODE
|
|
431
|
+
][0]
|
|
432
|
+
|
|
433
|
+
api2 = cast(FullNodeAPI, full_node_2.server.api)
|
|
434
|
+
|
|
435
|
+
for i, sp in enumerate(signage_points):
|
|
436
|
+
req: Union[full_node_protocol.RespondEndOfSubSlot, full_node_protocol.RespondSignagePoint]
|
|
437
|
+
|
|
438
|
+
if isinstance(sp, timelord_protocol.NewEndOfSubSlotVDF):
|
|
439
|
+
full_node_1.log.info(f"Injecting SP for end of sub-slot @ {i}")
|
|
440
|
+
|
|
441
|
+
req = full_node_protocol.RespondEndOfSubSlot(sp.end_of_sub_slot_bundle)
|
|
442
|
+
await api2.respond_end_of_sub_slot(req, full_node_2_peer_1)
|
|
443
|
+
else:
|
|
444
|
+
full_node_1.log.info(f"Injecting SP @ {i}: index: {sp.index_from_challenge}")
|
|
445
|
+
|
|
446
|
+
req = full_node_protocol.RespondSignagePoint(
|
|
447
|
+
sp.index_from_challenge,
|
|
448
|
+
sp.challenge_chain_sp_vdf,
|
|
449
|
+
sp.challenge_chain_sp_proof,
|
|
450
|
+
sp.reward_chain_sp_vdf,
|
|
451
|
+
sp.reward_chain_sp_proof,
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
await api2.respond_signage_point(req, full_node_2_peer_1)
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
# Pre-generated test signage points encoded as base64.
|
|
458
|
+
# Each element contains either a NewSignagePointVDF or a NewEndOfSubSlotVDF.
|
|
459
|
+
# If the first element of the tuple is True, then it is as NewEndOfSubSlotVDF.
|
|
460
|
+
# A FullBlock is also included which is infused already in the chain so
|
|
461
|
+
# that the next NewEndOfSubSlotVDF is valid.
|
|
462
|
+
# This block has to be added to the test FullNode before injecting the signage points.
|
|
463
|
+
def load_test_data() -> Tuple[List[FullBlock], SPList]:
|
|
464
|
+
file_path: str = dirname(__file__) + "/test_third_party_harvesters_data.json"
|
|
465
|
+
with open(file_path) as f:
|
|
466
|
+
data = json.load(f)
|
|
467
|
+
blocks = [FullBlock.from_bytes(base64.b64decode(cast(str, data["block"])))]
|
|
468
|
+
|
|
469
|
+
signage_points = [decode_sp(cast(bool, sp[0]), cast(str, sp[1])) for sp in data["signage_points"]]
|
|
470
|
+
return (blocks, signage_points)
|
|
@@@ -217,7 -217,7 +217,7 @@@ class TestDecompression
|
|
|
217
217
|
|
|
218
218
|
def test_decompress_cse(self) -> None:
|
|
219
219
|
"""Decompress a single CSE / CoinSpendEntry"""
|
|
220
|
--
cse0 = binutils.assemble(
|
|
220
|
++
cse0 = binutils.assemble(
|
|
221
221
|
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
|
|
222
222
|
)
|
|
223
223
|
cost, out = DECOMPRESS_CSE.run_with_cost(
|
|
@@@ -228,7 -228,7 +228,7 @@@
|
|
|
228
228
|
print(out)
|
|
229
229
|
|
|
230
230
|
def test_decompress_cse_with_prefix(self) -> None:
|
|
231
|
--
cse0 = binutils.assemble(
|
|
231
|
++
cse0 = binutils.assemble(
|
|
232
232
|
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
|
|
233
233
|
)
|
|
234
234
|
|
|
@@@ -245,10 -245,10 +245,10 @@@
|
|
|
245
245
|
|
|
246
246
|
def test_block_program_zero(self) -> None:
|
|
247
247
|
"Decompress a list of CSEs"
|
|
248
|
--
cse1 = binutils.assemble(
|
|
248
|
++
cse1 = binutils.assemble(
|
|
249
249
|
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
|
|
250
250
|
)
|
|
251
|
--
cse2 = binutils.assemble(
|
|
251
|
++
cse2 = binutils.assemble(
|
|
252
252
|
"""
|
|
253
253
|
(
|
|
254
254
|
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
|
|
@@@ -286,10 -286,10 +286,10 @@@
|
|
|
286
286
|
print(out)
|
|
287
287
|
|
|
288
288
|
def test_block_program_zero_with_curry(self) -> None:
|
|
289
|
--
cse1 = binutils.assemble(
|
|
289
|
++
cse1 = binutils.assemble(
|
|
290
290
|
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
|
|
291
291
|
)
|
|
292
|
--
cse2 = binutils.assemble(
|
|
292
|
++
cse2 = binutils.assemble(
|
|
293
293
|
"""
|
|
294
294
|
(
|
|
295
295
|
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
|
|
@@@ -46,7 -46,7 +46,7 @@@ GENERATOR_CODE = ""
|
|
|
46
46
|
COMPILED_GENERATOR_CODE = bytes(Program.to(compile_clvm_text(GENERATOR_CODE, []))) # type: ignore[no-untyped-call]
|
|
47
47
|
|
|
48
48
|
FIRST_GENERATOR = Program.to(
|
|
49
|
--
binutils.assemble(
|
|
49
|
++
binutils.assemble(
|
|
50
50
|
"""
|
|
51
51
|
((0x0000000000000000000000000000000000000000000000000000000000000000 1 50000
|
|
52
52
|
((51 0x0000000000000000000000000000000000000000000000000000000000000001 500))
|
|
@@@ -54,7 -54,7 +54,7 @@@
|
|
|
54
54
|
)
|
|
55
55
|
).as_bin()
|
|
56
56
|
|
|
57
|
--
SECOND_GENERATOR = Program.to(binutils.assemble("(extra data for block)")).as_bin()
|
|
57
|
++
SECOND_GENERATOR = Program.to(binutils.assemble("(extra data for block)")).as_bin()
|
|
58
58
|
|
|
59
59
|
|
|
60
60
|
def to_sp(sexp: bytes) -> SerializedProgram:
|
|
@@@ -1,240 -1,240 +1,0 @@@
|
|
|
1
|
--
from __future__ import annotations
|
|
2
|
--
|
|
3
|
--
import json
|
|
4
|
--
import random
|
|
5
|
--
import re
|
|
6
|
--
from collections import defaultdict
|
|
7
|
--
from dataclasses import dataclass, field
|
|
8
|
--
from pathlib import Path
|
|
9
|
--
from statistics import StatisticsError, mean, stdev
|
|
10
|
--
from typing import Any, Dict, List, Set, TextIO, Tuple, final
|
|
11
|
--
|
|
12
|
--
import click
|
|
13
|
--
import lxml.etree
|
|
14
|
--
|
|
15
|
--
|
|
16
|
--
@final
|
|
17
|
--
@dataclass(frozen=True, order=True)
|
|
18
|
--
class Result:
|
|
19
|
--
file_path: Path
|
|
20
|
--
test_path: Tuple[str, ...]
|
|
21
|
--
label: str
|
|
22
|
--
line: int = field(compare=False)
|
|
23
|
--
durations: Tuple[float, ...] = field(compare=False)
|
|
24
|
--
limit: float = field(compare=False)
|
|
25
|
--
|
|
26
|
--
def marshal(self) -> Dict[str, Any]:
|
|
27
|
--
return {
|
|
28
|
--
"file_path": self.file_path.as_posix(),
|
|
29
|
--
"test_path": self.test_path,
|
|
30
|
--
"label": self.label,
|
|
31
|
--
"duration": {
|
|
32
|
--
"all": self.durations,
|
|
33
|
--
"min": min(self.durations),
|
|
34
|
--
"max": max(self.durations),
|
|
35
|
--
"mean": mean(self.durations),
|
|
36
|
--
},
|
|
37
|
--
}
|
|
38
|
--
|
|
39
|
--
def link(self, prefix: str, line_separator: str) -> str:
|
|
40
|
--
return f"{prefix}{self.file_path.as_posix()}{line_separator}{self.line}"
|
|
41
|
--
|
|
42
|
--
|
|
43
|
--
def sub(matchobj: re.Match[str]) -> str:
|
|
44
|
--
result = ""
|
|
45
|
--
|
|
46
|
--
if matchobj.group("start") == "[":
|
|
47
|
--
result += "["
|
|
48
|
--
|
|
49
|
--
if matchobj.group("start") == matchobj.group("end") == "-":
|
|
50
|
--
result += "-"
|
|
51
|
--
|
|
52
|
--
if matchobj.group("end") == "]":
|
|
53
|
--
result += "]"
|
|
54
|
--
|
|
55
|
--
return result
|
|
56
|
--
|
|
57
|
--
|
|
58
|
--
@click.command(context_settings={"help_option_names": ["-h", "--help"]})
|
|
59
|
--
@click.option(
|
|
60
|
--
"--xml",
|
|
61
|
--
"xml_file",
|
|
62
|
--
required=True,
|
|
63
|
--
type=click.File(),
|
|
64
|
--
help="The benchmarks JUnit XML results file",
|
|
65
|
--
)
|
|
66
|
--
@click.option(
|
|
67
|
--
"--link-prefix",
|
|
68
|
--
default="",
|
|
69
|
--
help="Prefix for output links such as for web links instead of IDE links",
|
|
70
|
--
show_default=True,
|
|
71
|
--
)
|
|
72
|
--
@click.option(
|
|
73
|
--
"--link-line-separator",
|
|
74
|
--
default=":",
|
|
75
|
--
help="The separator between the path and the line number, such as : for local links and #L on GitHub",
|
|
76
|
--
show_default=True,
|
|
77
|
--
)
|
|
78
|
--
@click.option(
|
|
79
|
--
"--output",
|
|
80
|
--
default="-",
|
|
81
|
--
type=click.File(mode="w", encoding="utf-8", lazy=True, atomic=True),
|
|
82
|
--
help="Output file, - for stdout",
|
|
83
|
--
show_default=True,
|
|
84
|
--
)
|
|
85
|
--
# TODO: anything but this pattern for output types
|
|
86
|
--
@click.option(
|
|
87
|
--
"--markdown/--no-markdown",
|
|
88
|
--
help="Use markdown as output format",
|
|
89
|
--
show_default=True,
|
|
90
|
--
)
|
|
91
|
--
@click.option(
|
|
92
|
--
"--percent-margin",
|
|
93
|
--
default=15,
|
|
94
|
--
type=int,
|
|
95
|
--
help="Highlight results with maximums within this percent of the limit",
|
|
96
|
--
show_default=True,
|
|
97
|
--
)
|
|
98
|
--
@click.option(
|
|
99
|
--
"--randomoji/--determimoji",
|
|
100
|
--
help="🍿",
|
|
101
|
--
show_default=True,
|
|
102
|
--
)
|
|
103
|
--
def main(
|
|
104
|
--
xml_file: TextIO,
|
|
105
|
--
link_prefix: str,
|
|
106
|
--
link_line_separator: str,
|
|
107
|
--
output: TextIO,
|
|
108
|
--
markdown: bool,
|
|
109
|
--
percent_margin: int,
|
|
110
|
--
randomoji: bool,
|
|
111
|
--
) -> None:
|
|
112
|
--
tree = lxml.etree.parse(xml_file)
|
|
113
|
--
root = tree.getroot()
|
|
114
|
--
benchmarks = root.find("testsuite[@name='benchmarks']")
|
|
115
|
--
|
|
116
|
--
# raw_durations: defaultdict[Tuple[str, ...], List[Result]] = defaultdict(list)
|
|
117
|
--
|
|
118
|
--
cases_by_test_path: defaultdict[Tuple[str, ...], List[lxml.etree.Element]] = defaultdict(list)
|
|
119
|
--
for case in benchmarks.findall("testcase"):
|
|
120
|
--
raw_name = case.attrib["name"]
|
|
121
|
--
name = re.sub(r"(?P<start>[-\[])benchmark_repeat\d{3}(?P<end>[-\])])", sub, raw_name)
|
|
122
|
--
# TODO: seems to duplicate the class and function name, though not the parametrizations
|
|
123
|
--
test_path = (
|
|
124
|
--
*case.attrib["classname"].split("."),
|
|
125
|
--
name,
|
|
126
|
--
)
|
|
127
|
--
cases_by_test_path[test_path].append(case)
|
|
128
|
--
|
|
129
|
--
results: List[Result] = []
|
|
130
|
--
for test_path, cases in cases_by_test_path.items():
|
|
131
|
--
labels: Set[str] = set()
|
|
132
|
--
for case in cases:
|
|
133
|
--
properties = case.find("properties")
|
|
134
|
--
labels.update(property.attrib["name"].partition(":")[2] for property in properties)
|
|
135
|
--
|
|
136
|
--
for label in labels:
|
|
137
|
--
query = "properties/property[@name='{property}:{label}']"
|
|
138
|
--
|
|
139
|
--
durations = [
|
|
140
|
--
float(property.attrib["value"])
|
|
141
|
--
for case in cases
|
|
142
|
--
for property in case.xpath(query.format(label=label, property="duration"))
|
|
143
|
--
]
|
|
144
|
--
|
|
145
|
--
a_case = cases[0]
|
|
146
|
--
|
|
147
|
--
file_path: Path
|
|
148
|
--
[file_path] = [
|
|
149
|
--
Path(property.attrib["value"]) for property in a_case.xpath(query.format(label=label, property="path"))
|
|
150
|
--
]
|
|
151
|
--
|
|
152
|
--
line: int
|
|
153
|
--
[line] = [
|
|
154
|
--
int(property.attrib["value"]) for property in a_case.xpath(query.format(label=label, property="line"))
|
|
155
|
--
]
|
|
156
|
--
|
|
157
|
--
limit: float
|
|
158
|
--
[limit] = [
|
|
159
|
--
float(property.attrib["value"])
|
|
160
|
--
for property in a_case.xpath(query.format(label=label, property="limit"))
|
|
161
|
--
]
|
|
162
|
--
|
|
163
|
--
results.append(
|
|
164
|
--
Result(
|
|
165
|
--
file_path=file_path,
|
|
166
|
--
test_path=test_path,
|
|
167
|
--
line=line,
|
|
168
|
--
label=label,
|
|
169
|
--
durations=tuple(durations),
|
|
170
|
--
limit=limit,
|
|
171
|
--
)
|
|
172
|
--
)
|
|
173
|
--
|
|
174
|
--
if not markdown:
|
|
175
|
--
for result in results:
|
|
176
|
--
link = result.link(prefix=link_prefix, line_separator=link_line_separator)
|
|
177
|
--
dumped = json.dumps(result.marshal())
|
|
178
|
--
output.write(f"{link} {dumped}\n")
|
|
179
|
--
else:
|
|
180
|
--
output.write("| Test | 🍿 | Mean | Max | 3σ | Limit | Percent |\n")
|
|
181
|
--
output.write("| --- | --- | --- | --- | --- | --- | --- |\n")
|
|
182
|
--
for result in sorted(results):
|
|
183
|
--
link_url = result.link(prefix=link_prefix, line_separator=link_line_separator)
|
|
184
|
--
|
|
185
|
--
mean_str = "-"
|
|
186
|
--
three_sigma_str = "-"
|
|
187
|
--
if len(result.durations) > 1:
|
|
188
|
--
durations_mean = mean(result.durations)
|
|
189
|
--
mean_str = f"{durations_mean:.3f} s"
|
|
190
|
--
|
|
191
|
--
try:
|
|
192
|
--
three_sigma_str = f"{durations_mean + 3 * stdev(result.durations):.3f} s"
|
|
193
|
--
except StatisticsError:
|
|
194
|
--
pass
|
|
195
|
--
|
|
196
|
--
durations_max = max(result.durations)
|
|
197
|
--
max_str = f"{durations_max:.3f} s"
|
|
198
|
--
|
|
199
|
--
limit_str = f"{result.limit:.3f} s"
|
|
200
|
--
|
|
201
|
--
percent = 100 * durations_max / result.limit
|
|
202
|
--
if percent >= 100:
|
|
203
|
--
# intentionally biasing towards 🍄
|
|
204
|
--
choices = "🍄🍄🍎🍅" # 🌶️🍉🍒🍓
|
|
205
|
--
elif percent >= (100 - percent_margin):
|
|
206
|
--
choices = "🍋🍌" # 🍍🌽
|
|
207
|
--
else:
|
|
208
|
--
choices = "🫛🍈🍏🍐🥝🥒🥬🥦"
|
|
209
|
--
|
|
210
|
--
marker: str
|
|
211
|
--
if randomoji:
|
|
212
|
--
marker = random.choice(choices)
|
|
213
|
--
else:
|
|
214
|
--
marker = choices[0]
|
|
215
|
--
|
|
216
|
--
percent_str = f"{percent:.0f} %"
|
|
217
|
--
|
|
218
|
--
test_path_str = ".".join(result.test_path[1:])
|
|
219
|
--
|
|
220
|
--
test_link_text: str
|
|
221
|
--
if result.label == "":
|
|
222
|
--
test_link_text = f"`{test_path_str}`"
|
|
223
|
--
else:
|
|
224
|
--
test_link_text = f"`{test_path_str}` - {result.label}"
|
|
225
|
--
|
|
226
|
--
output.write(
|
|
227
|
--
f"| [{test_link_text}]({link_url})"
|
|
228
|
--
+ f" | {marker}"
|
|
229
|
--
+ f" | {mean_str}"
|
|
230
|
--
+ f" | {max_str}"
|
|
231
|
--
+ f" | {three_sigma_str}"
|
|
232
|
--
+ f" | {limit_str}"
|
|
233
|
--
+ f" | {percent_str}"
|
|
234
|
--
+ " |\n"
|
|
235
|
--
)
|
|
236
|
--
|
|
237
|
--
|
|
238
|
--
if __name__ == "__main__":
|
|
239
|
--
# pylint: disable = no-value-for-parameter
|
|
240
|
--
main()
|
|
@@@ -1,0 -1,0 +1,349 @@@
|
|
|
1
|
++
from __future__ import annotations
|
|
2
|
++
|
|
3
|
++
import dataclasses
|
|
4
|
++
import json
|
|
5
|
++
import random
|
|
6
|
++
from collections import defaultdict
|
|
7
|
++
from dataclasses import dataclass, field
|
|
8
|
++
from pathlib import Path
|
|
9
|
++
from statistics import StatisticsError, mean, stdev
|
|
10
|
++
from typing import Any, Dict, List, Optional, TextIO, Tuple, Type, final
|
|
11
|
++
|
|
12
|
++
import click
|
|
13
|
++
import lxml.etree
|
|
14
|
++
|
|
15
|
++
from tests.util.misc import BenchmarkData, DataTypeProtocol, TestId
|
|
16
|
++
from tests.util.time_out_assert import TimeOutAssertData
|
|
17
|
++
|
|
18
|
++
supported_data_types: List[Type[DataTypeProtocol]] = [TimeOutAssertData, BenchmarkData]
|
|
19
|
++
supported_data_types_by_tag: Dict[str, Type[DataTypeProtocol]] = {cls.tag: cls for cls in supported_data_types}
|
|
20
|
++
|
|
21
|
++
|
|
22
|
++
@final
|
|
23
|
++
@dataclass(frozen=True, order=True)
|
|
24
|
++
class Result:
|
|
25
|
++
file_path: Path
|
|
26
|
++
test_path: Tuple[str, ...]
|
|
27
|
++
ids: Tuple[str, ...]
|
|
28
|
++
label: str
|
|
29
|
++
line: int = field(compare=False)
|
|
30
|
++
durations: Tuple[float, ...] = field(compare=False)
|
|
31
|
++
limit: float = field(compare=False)
|
|
32
|
++
|
|
33
|
++
def marshal(self) -> Dict[str, Any]:
|
|
34
|
++
return {
|
|
35
|
++
"file_path": self.file_path.as_posix(),
|
|
36
|
++
"test_path": self.test_path,
|
|
37
|
++
"label": self.label,
|
|
38
|
++
"duration": {
|
|
39
|
++
"all": self.durations,
|
|
40
|
++
"min": min(self.durations),
|
|
41
|
++
"max": max(self.durations),
|
|
42
|
++
"mean": mean(self.durations),
|
|
43
|
++
},
|
|
44
|
++
}
|
|
45
|
++
|
|
46
|
++
def link(self, prefix: str, line_separator: str) -> str:
|
|
47
|
++
return f"{prefix}{self.file_path.as_posix()}{line_separator}{self.line}"
|
|
48
|
++
|
|
49
|
++
|
|
50
|
++
@final
|
|
51
|
++
@dataclasses.dataclass(frozen=True)
|
|
52
|
++
class EventId:
|
|
53
|
++
test_id: TestId
|
|
54
|
++
tag: str
|
|
55
|
++
line: int
|
|
56
|
++
path: Path
|
|
57
|
++
label: str
|
|
58
|
++
|
|
59
|
++
|
|
60
|
++
@click.command(context_settings={"help_option_names": ["-h", "--help"]})
|
|
61
|
++
@click.option(
|
|
62
|
++
"--xml",
|
|
63
|
++
"xml_file",
|
|
64
|
++
required=True,
|
|
65
|
++
type=click.File(),
|
|
66
|
++
help="The benchmarks JUnit XML results file",
|
|
67
|
++
)
|
|
68
|
++
@click.option(
|
|
69
|
++
"--link-prefix",
|
|
70
|
++
default="",
|
|
71
|
++
help="Prefix for output links such as for web links instead of IDE links",
|
|
72
|
++
show_default=True,
|
|
73
|
++
)
|
|
74
|
++
@click.option(
|
|
75
|
++
"--link-line-separator",
|
|
76
|
++
default=":",
|
|
77
|
++
help="The separator between the path and the line number, such as : for local links and #L on GitHub",
|
|
78
|
++
show_default=True,
|
|
79
|
++
)
|
|
80
|
++
@click.option(
|
|
81
|
++
"--output",
|
|
82
|
++
default="-",
|
|
83
|
++
type=click.File(mode="w", encoding="utf-8", lazy=True, atomic=True),
|
|
84
|
++
help="Output file, - for stdout",
|
|
85
|
++
show_default=True,
|
|
86
|
++
)
|
|
87
|
++
# TODO: anything but this pattern for output types
|
|
88
|
++
@click.option(
|
|
89
|
++
"--markdown/--no-markdown",
|
|
90
|
++
help="Use markdown as output format",
|
|
91
|
++
show_default=True,
|
|
92
|
++
)
|
|
93
|
++
@click.option(
|
|
94
|
++
"--percent-margin",
|
|
95
|
++
default=15,
|
|
96
|
++
type=int,
|
|
97
|
++
help="Highlight results with maximums within this percent of the limit",
|
|
98
|
++
show_default=True,
|
|
99
|
++
)
|
|
100
|
++
@click.option(
|
|
101
|
++
"--randomoji/--determimoji",
|
|
102
|
++
help="🍿",
|
|
103
|
++
show_default=True,
|
|
104
|
++
)
|
|
105
|
++
# TODO: subcommands?
|
|
106
|
++
@click.option(
|
|
107
|
++
"--type",
|
|
108
|
++
"tag",
|
|
109
|
++
type=click.Choice([cls.tag for cls in supported_data_types]),
|
|
110
|
++
help="The type of data to process",
|
|
111
|
++
required=True,
|
|
112
|
++
show_default=True,
|
|
113
|
++
)
|
|
114
|
++
@click.option(
|
|
115
|
++
"--limit",
|
|
116
|
++
"result_count_limit",
|
|
117
|
++
type=int,
|
|
118
|
++
help="Limit the number of results to output.",
|
|
119
|
++
)
|
|
120
|
++
def main(
|
|
121
|
++
xml_file: TextIO,
|
|
122
|
++
link_prefix: str,
|
|
123
|
++
link_line_separator: str,
|
|
124
|
++
output: TextIO,
|
|
125
|
++
markdown: bool,
|
|
126
|
++
percent_margin: int,
|
|
127
|
++
randomoji: bool,
|
|
128
|
++
tag: str,
|
|
129
|
++
result_count_limit: Optional[int],
|
|
130
|
++
) -> None:
|
|
131
|
++
data_type = supported_data_types_by_tag[tag]
|
|
132
|
++
|
|
133
|
++
tree = lxml.etree.parse(xml_file)
|
|
134
|
++
root = tree.getroot()
|
|
135
|
++
|
|
136
|
++
cases_by_test_id: defaultdict[TestId, List[lxml.etree.Element]] = defaultdict(list)
|
|
137
|
++
for suite in root.findall("testsuite"):
|
|
138
|
++
for case in suite.findall("testcase"):
|
|
139
|
++
if case.find("skipped") is not None:
|
|
140
|
++
continue
|
|
141
|
++
test_id_property = case.find("properties/property[@name='test_id']")
|
|
142
|
++
test_id = TestId.unmarshal(json.loads(test_id_property.attrib["value"]))
|
|
143
|
++
test_id = dataclasses.replace(
|
|
144
|
++
test_id, ids=tuple(id for id in test_id.ids if not id.startswith(f"{data_type.tag}_repeat"))
|
|
145
|
++
)
|
|
146
|
++
cases_by_test_id[test_id].append(case)
|
|
147
|
++
|
|
148
|
++
data_by_event_id: defaultdict[EventId, List[DataTypeProtocol]] = defaultdict(list)
|
|
149
|
++
for test_id, cases in cases_by_test_id.items():
|
|
150
|
++
for case in cases:
|
|
151
|
++
for property in case.findall(f"properties/property[@name='{tag}']"):
|
|
152
|
++
tag = property.attrib["name"]
|
|
153
|
++
data = supported_data_types_by_tag[tag].unmarshal(json.loads(property.attrib["value"]))
|
|
154
|
++
event_id = EventId(test_id=test_id, tag=tag, line=data.line, path=data.path, label=data.label)
|
|
155
|
++
data_by_event_id[event_id].append(data)
|
|
156
|
++
|
|
157
|
++
results: List[Result] = []
|
|
158
|
++
for event_id, datas in data_by_event_id.items():
|
|
159
|
++
[limit] = {data.limit for data in datas}
|
|
160
|
++
results.append(
|
|
161
|
++
Result(
|
|
162
|
++
file_path=event_id.path,
|
|
163
|
++
test_path=event_id.test_id.test_path,
|
|
164
|
++
ids=event_id.test_id.ids,
|
|
165
|
++
line=event_id.line,
|
|
166
|
++
durations=tuple(data.duration for data in datas),
|
|
167
|
++
limit=limit,
|
|
168
|
++
label=event_id.label,
|
|
169
|
++
)
|
|
170
|
++
)
|
|
171
|
++
|
|
172
|
++
if result_count_limit is not None:
|
|
173
|
++
results = sorted(results, key=lambda result: max(result.durations) / result.limit, reverse=True)
|
|
174
|
++
results = results[:result_count_limit]
|
|
175
|
++
|
|
176
|
++
handlers = {
|
|
177
|
++
BenchmarkData.tag: output_benchmark,
|
|
178
|
++
TimeOutAssertData.tag: output_time_out_assert,
|
|
179
|
++
}
|
|
180
|
++
handler = handlers[data_type.tag]
|
|
181
|
++
handler(
|
|
182
|
++
link_line_separator=link_line_separator,
|
|
183
|
++
link_prefix=link_prefix,
|
|
184
|
++
markdown=markdown,
|
|
185
|
++
output=output,
|
|
186
|
++
percent_margin=percent_margin,
|
|
187
|
++
randomoji=randomoji,
|
|
188
|
++
results=results,
|
|
189
|
++
)
|
|
190
|
++
|
|
191
|
++
|
|
192
|
++
def output_benchmark(
|
|
193
|
++
link_line_separator: str,
|
|
194
|
++
link_prefix: str,
|
|
195
|
++
markdown: bool,
|
|
196
|
++
output: TextIO,
|
|
197
|
++
percent_margin: int,
|
|
198
|
++
randomoji: bool,
|
|
199
|
++
results: List[Result],
|
|
200
|
++
) -> None:
|
|
201
|
++
if not markdown:
|
|
202
|
++
for result in sorted(results):
|
|
203
|
++
link = result.link(prefix=link_prefix, line_separator=link_line_separator)
|
|
204
|
++
dumped = json.dumps(result.marshal())
|
|
205
|
++
output.write(f"{link} {dumped}\n")
|
|
206
|
++
else:
|
|
207
|
++
output.write("# Benchmark Metrics\n\n")
|
|
208
|
++
|
|
209
|
++
output.write("| Test | 🍿 | Mean | Max | 3σ | Limit | Percent |\n")
|
|
210
|
++
output.write("| --- | --- | --- | --- | --- | --- | --- |\n")
|
|
211
|
++
for result in sorted(results):
|
|
212
|
++
link_url = result.link(prefix=link_prefix, line_separator=link_line_separator)
|
|
213
|
++
|
|
214
|
++
mean_str = "-"
|
|
215
|
++
three_sigma_str = "-"
|
|
216
|
++
if len(result.durations) > 1:
|
|
217
|
++
durations_mean = mean(result.durations)
|
|
218
|
++
mean_str = f"{durations_mean:.3f} s"
|
|
219
|
++
|
|
220
|
++
try:
|
|
221
|
++
three_sigma_str = f"{durations_mean + 3 * stdev(result.durations):.3f} s"
|
|
222
|
++
except StatisticsError:
|
|
223
|
++
pass
|
|
224
|
++
|
|
225
|
++
durations_max = max(result.durations)
|
|
226
|
++
max_str = f"{durations_max:.3f} s"
|
|
227
|
++
|
|
228
|
++
limit_str = f"{result.limit:.3f} s"
|
|
229
|
++
|
|
230
|
++
percent = 100 * durations_max / result.limit
|
|
231
|
++
if percent >= 100:
|
|
232
|
++
# intentionally biasing towards 🍄
|
|
233
|
++
choices = "🍄🍄🍎🍅" # 🌶️🍉🍒🍓
|
|
234
|
++
elif percent >= (100 - percent_margin):
|
|
235
|
++
choices = "🍋🍌" # 🍍🌽
|
|
236
|
++
else:
|
|
237
|
++
choices = "🫛🍈🍏🍐🥝🥒🥬🥦"
|
|
238
|
++
|
|
239
|
++
marker: str
|
|
240
|
++
if randomoji:
|
|
241
|
++
marker = random.choice(choices)
|
|
242
|
++
else:
|
|
243
|
++
marker = choices[0]
|
|
244
|
++
|
|
245
|
++
percent_str = f"{percent:.0f} %"
|
|
246
|
++
|
|
247
|
++
test_path_str = ".".join(result.test_path[1:])
|
|
248
|
++
if len(result.ids) > 0:
|
|
249
|
++
test_path_str += f"[{'-'.join(result.ids)}]"
|
|
250
|
++
|
|
251
|
++
test_link_text: str
|
|
252
|
++
if result.label == "":
|
|
253
|
++
test_link_text = f"`{test_path_str}`"
|
|
254
|
++
else:
|
|
255
|
++
test_link_text = f"`{test_path_str}` - {result.label}"
|
|
256
|
++
|
|
257
|
++
output.write(
|
|
258
|
++
f"| [{test_link_text}]({link_url})"
|
|
259
|
++
+ f" | {marker}"
|
|
260
|
++
+ f" | {mean_str}"
|
|
261
|
++
+ f" | {max_str}"
|
|
262
|
++
+ f" | {three_sigma_str}"
|
|
263
|
++
+ f" | {limit_str}"
|
|
264
|
++
+ f" | {percent_str}"
|
|
265
|
++
+ " |\n"
|
|
266
|
++
)
|
|
267
|
++
|
|
268
|
++
|
|
269
|
++
def output_time_out_assert(
|
|
270
|
++
link_line_separator: str,
|
|
271
|
++
link_prefix: str,
|
|
272
|
++
markdown: bool,
|
|
273
|
++
output: TextIO,
|
|
274
|
++
percent_margin: int,
|
|
275
|
++
randomoji: bool,
|
|
276
|
++
results: List[Result],
|
|
277
|
++
) -> None:
|
|
278
|
++
if not markdown:
|
|
279
|
++
for result in sorted(results):
|
|
280
|
++
link = result.link(prefix=link_prefix, line_separator=link_line_separator)
|
|
281
|
++
dumped = json.dumps(result.marshal())
|
|
282
|
++
output.write(f"{link} {dumped}\n")
|
|
283
|
++
else:
|
|
284
|
++
output.write("# Time Out Assert Metrics\n\n")
|
|
285
|
++
|
|
286
|
++
output.write("| Test | 🍿 | Mean | Max | 3σ | Limit | Percent |\n")
|
|
287
|
++
output.write("| --- | --- | --- | --- | --- | --- | --- |\n")
|
|
288
|
++
for result in sorted(results):
|
|
289
|
++
link_url = result.link(prefix=link_prefix, line_separator=link_line_separator)
|
|
290
|
++
|
|
291
|
++
mean_str = "-"
|
|
292
|
++
three_sigma_str = "-"
|
|
293
|
++
if len(result.durations) > 1:
|
|
294
|
++
durations_mean = mean(result.durations)
|
|
295
|
++
mean_str = f"{durations_mean:.3f} s"
|
|
296
|
++
|
|
297
|
++
try:
|
|
298
|
++
three_sigma_str = f"{durations_mean + 3 * stdev(result.durations):.3f} s"
|
|
299
|
++
except StatisticsError:
|
|
300
|
++
pass
|
|
301
|
++
|
|
302
|
++
durations_max = max(result.durations)
|
|
303
|
++
max_str = f"{durations_max:.3f} s"
|
|
304
|
++
|
|
305
|
++
limit_str = f"{result.limit:.3f} s"
|
|
306
|
++
|
|
307
|
++
percent = 100 * durations_max / result.limit
|
|
308
|
++
if percent >= 100:
|
|
309
|
++
# intentionally biasing towards 🍄
|
|
310
|
++
choices = "🍄🍄🍎🍅" # 🌶️🍉🍒🍓
|
|
311
|
++
elif percent >= (100 - percent_margin):
|
|
312
|
++
choices = "🍋🍌" # 🍍🌽
|
|
313
|
++
else:
|
|
314
|
++
choices = "🫛🍈🍏🍐🥝🥒🥬🥦"
|
|
315
|
++
|
|
316
|
++
marker: str
|
|
317
|
++
if randomoji:
|
|
318
|
++
marker = random.choice(choices)
|
|
319
|
++
else:
|
|
320
|
++
marker = choices[0]
|
|
321
|
++
|
|
322
|
++
percent_str = f"{percent:.0f} %"
|
|
323
|
++
|
|
324
|
++
test_path_str = ".".join(result.test_path[1:])
|
|
325
|
++
if len(result.ids) > 0:
|
|
326
|
++
test_path_str += f"[{'-'.join(result.ids)}]"
|
|
327
|
++
|
|
328
|
++
test_link_text: str
|
|
329
|
++
if result.label == "":
|
|
330
|
++
# TODO: but could be in different files too
|
|
331
|
++
test_link_text = f"`{test_path_str}` - {result.line}"
|
|
332
|
++
else:
|
|
333
|
++
test_link_text = f"`{test_path_str}` - {result.label}"
|
|
334
|
++
|
|
335
|
++
output.write(
|
|
336
|
++
f"| [{test_link_text}]({link_url})"
|
|
337
|
++
+ f" | {marker}"
|
|
338
|
++
+ f" | {mean_str}"
|
|
339
|
++
+ f" | {max_str}"
|
|
340
|
++
+ f" | {three_sigma_str}"
|
|
341
|
++
+ f" | {limit_str}"
|
|
342
|
++
+ f" | {percent_str}"
|
|
343
|
++
+ " |\n"
|
|
344
|
++
)
|
|
345
|
++
|
|
346
|
++
|
|
347
|
++
if __name__ == "__main__":
|
|
348
|
++
# pylint: disable = no-value-for-parameter
|
|
349
|
++
main()
|
|
1
|
--
|
|
350
|
++
+ b/tests/rpc/__init__.py
|
|
@@@ -1,0 -1,0 +1,49 @@@
|
|
|
1
|
++
from __future__ import annotations
|
|
2
|
++
|
|
3
|
++
from dataclasses import dataclass
|
|
4
|
++
from pathlib import Path
|
|
5
|
++
from typing import Any, Dict, Optional
|
|
6
|
++
|
|
7
|
++
import pytest
|
|
8
|
++
|
|
9
|
++
from chia.rpc.rpc_client import RpcClient
|
|
10
|
++
from chia.util.ints import uint16
|
|
11
|
++
from tests.util.misc import Marks, RecordingWebServer, datacases
|
|
12
|
++
|
|
13
|
++
|
|
14
|
++
@dataclass
|
|
15
|
++
class InvalidCreateCase:
|
|
16
|
++
id: str
|
|
17
|
++
root_path: Optional[Path] = None
|
|
18
|
++
net_config: Optional[Dict[str, Any]] = None
|
|
19
|
++
marks: Marks = ()
|
|
20
|
++
|
|
21
|
++
|
|
22
|
++
@datacases(
|
|
23
|
++
InvalidCreateCase(id="just root path", root_path=Path("/root/path")),
|
|
24
|
++
InvalidCreateCase(id="just net config", net_config={}),
|
|
25
|
++
)
|
|
26
|
++
@pytest.mark.anyio
|
|
27
|
++
async def test_rpc_client_create_raises_for_invalid_root_path_net_config_combinations(
|
|
28
|
++
case: InvalidCreateCase,
|
|
29
|
++
) -> None:
|
|
30
|
++
with pytest.raises(ValueError, match="Either both or neither of"):
|
|
31
|
++
await RpcClient.create(
|
|
32
|
++
self_hostname="",
|
|
33
|
++
port=uint16(0),
|
|
34
|
++
root_path=case.root_path,
|
|
35
|
++
net_config=case.net_config,
|
|
36
|
++
)
|
|
37
|
++
|
|
38
|
++
|
|
39
|
++
@pytest.mark.anyio
|
|
40
|
++
async def test_rpc_client_works_without_ssl(recording_web_server: RecordingWebServer) -> None:
|
|
41
|
++
expected_result = {"success": True, "daddy": "putdown"}
|
|
42
|
++
|
|
43
|
++
async with RpcClient.create_as_context(
|
|
44
|
++
self_hostname=recording_web_server.web_server.hostname,
|
|
45
|
++
port=recording_web_server.web_server.listen_port,
|
|
46
|
++
) as rpc_client:
|
|
47
|
++
result = await rpc_client.fetch(path="", request_json={"response": expected_result})
|
|
48
|
++
|
|
49
|
++
assert result == expected_result
|
|
@@@ -5,6 -5,6 +5,7 @@@ import dataclasse
|
|
|
5
5
|
import enum
|
|
6
6
|
import functools
|
|
7
7
|
import gc
|
|
8
|
++
import json
|
|
8
9
|
import logging
|
|
9
10
|
import os
|
|
10
11
|
import pathlib
|
|
@@@ -12,29 -11,23 +13,53 @@@ import ss
|
|
|
12
13
|
import subprocess
|
|
13
14
|
import sys
|
|
14
15
|
from concurrent.futures import Future
|
|
15
|
-
from
|
|
16
|
+
from dataclasses import dataclass, field
|
|
16
|
-
from
|
|
17
|
++
from pathlib import Path
|
|
17
18
|
from statistics import mean
|
|
18
19
|
from textwrap import dedent
|
|
19
20
|
from time import thread_time
|
|
20
21
|
from types import TracebackType
|
|
21
|
-
from typing import
|
|
22
|
-
|
|
23
|
-
|
|
22
|
++
from typing import (
|
|
23
|
++
TYPE_CHECKING,
|
|
24
|
++
Any,
|
|
25
|
++
Awaitable,
|
|
26
|
++
Callable,
|
|
27
|
++
ClassVar,
|
|
28
|
++
Collection,
|
|
29
|
++
Dict,
|
|
30
|
++
Iterator,
|
|
31
|
++
List,
|
|
32
|
++
Optional,
|
|
33
|
++
Protocol,
|
|
34
|
++
TextIO,
|
|
35
|
++
Tuple,
|
|
36
|
++
Type,
|
|
37
|
++
TypeVar,
|
|
38
|
++
Union,
|
|
39
|
++
cast,
|
|
40
|
++
final,
|
|
41
|
++
)
|
|
42
|
+
|
|
43
|
+
import aiohttp
|
|
24
44
|
import pytest
|
|
45
|
++
|
|
46
|
++
# TODO: update after resolution in https://github.com/pytest-dev/pytest/issues/7469
|
|
47
|
++
from _pytest.nodes import Node
|
|
48
|
+
from aiohttp import web
|
|
25
49
|
from chia_rs import Coin
|
|
26
|
--
from typing_extensions import Protocol, final
|
|
27
50
|
|
|
28
51
|
import chia
|
|
52
|
++
import tests
|
|
53
|
+
from chia.full_node.mempool import Mempool
|
|
29
54
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
30
55
|
from chia.types.condition_opcodes import ConditionOpcode
|
|
31
56
|
from chia.util.hash import std_hash
|
|
32
|
-
from chia.util.ints import uint64
|
|
57
|
+
from chia.util.ints import uint16, uint32, uint64
|
|
58
|
++
from chia.util.misc import caller_file_and_line
|
|
59
|
+
from chia.util.network import WebServer
|
|
33
60
|
from chia.wallet.util.compute_hints import HintedCoin
|
|
61
|
+
from chia.wallet.wallet_node import WalletNode
|
|
62
|
++
from tests import ether
|
|
34
63
|
from tests.core.data_layer.util import ChiaRoot
|
|
35
64
|
|
|
36
65
|
|
|
@@@ -70,11 -63,11 +95,6 @@@ def manage_gc(mode: GcMode) -> Iterator
|
|
|
70
95
|
gc.disable()
|
|
71
96
|
|
|
72
97
|
|
|
73
|
--
def caller_file_and_line(distance: int = 1) -> Tuple[str, int]:
|
|
74
|
--
caller = getframeinfo(stack()[distance + 1][0])
|
|
75
|
--
return caller.filename, caller.lineno
|
|
76
|
--
|
|
77
|
--
|
|
78
98
|
@dataclasses.dataclass(frozen=True)
|
|
79
99
|
class RuntimeResults:
|
|
80
100
|
start: float
|
|
@@@ -180,7 -173,7 +200,12 @@@ def measure_runtime
|
|
|
180
200
|
overhead: Optional[float] = None,
|
|
181
201
|
print_results: bool = True,
|
|
182
202
|
) -> Iterator[Future[RuntimeResults]]:
|
|
183
|
--
entry_file, entry_line = caller_file_and_line(
|
|
203
|
++
entry_file, entry_line = caller_file_and_line(
|
|
204
|
++
relative_to=(
|
|
205
|
++
pathlib.Path(chia.__file__).parent.parent,
|
|
206
|
++
pathlib.Path(tests.__file__).parent.parent,
|
|
207
|
++
)
|
|
208
|
++
)
|
|
184
209
|
|
|
185
210
|
results_future: Future[RuntimeResults] = Future()
|
|
186
211
|
|
|
@@@ -210,6 -203,6 +235,43 @@@
|
|
|
210
235
|
print(results.block(label=label))
|
|
211
236
|
|
|
212
237
|
|
|
238
|
++
@final
|
|
239
|
++
@dataclasses.dataclass(frozen=True)
|
|
240
|
++
class BenchmarkData:
|
|
241
|
++
if TYPE_CHECKING:
|
|
242
|
++
_protocol_check: ClassVar[DataTypeProtocol] = cast("BenchmarkData", None)
|
|
243
|
++
|
|
244
|
++
tag: ClassVar[str] = "benchmark"
|
|
245
|
++
|
|
246
|
++
duration: float
|
|
247
|
++
path: pathlib.Path
|
|
248
|
++
line: int
|
|
249
|
++
limit: float
|
|
250
|
++
|
|
251
|
++
label: str
|
|
252
|
++
|
|
253
|
++
__match_args__: ClassVar[Tuple[str, ...]] = ()
|
|
254
|
++
|
|
255
|
++
@classmethod
|
|
256
|
++
def unmarshal(cls, marshalled: Dict[str, Any]) -> BenchmarkData:
|
|
257
|
++
return cls(
|
|
258
|
++
duration=marshalled["duration"],
|
|
259
|
++
path=pathlib.Path(marshalled["path"]),
|
|
260
|
++
line=int(marshalled["line"]),
|
|
261
|
++
limit=marshalled["limit"],
|
|
262
|
++
label=marshalled["label"],
|
|
263
|
++
)
|
|
264
|
++
|
|
265
|
++
def marshal(self) -> Dict[str, Any]:
|
|
266
|
++
return {
|
|
267
|
++
"duration": self.duration,
|
|
268
|
++
"path": self.path.as_posix(),
|
|
269
|
++
"line": self.line,
|
|
270
|
++
"limit": self.limit,
|
|
271
|
++
"label": self.label,
|
|
272
|
++
}
|
|
273
|
++
|
|
274
|
++
|
|
213
275
|
@final
|
|
214
276
|
@dataclasses.dataclass
|
|
215
277
|
class _AssertRuntime:
|
|
@@@ -236,6 -229,6 +298,7 @@@
|
|
|
236
298
|
# https://github.com/pytest-dev/pytest/issues/2057
|
|
237
299
|
|
|
238
300
|
seconds: float
|
|
301
|
++
# TODO: Optional?
|
|
239
302
|
label: str = ""
|
|
240
303
|
clock: Callable[[], float] = thread_time
|
|
241
304
|
gc_mode: GcMode = GcMode.disable
|
|
@@@ -247,10 -240,10 +310,14 @@@
|
|
|
247
310
|
runtime_manager: Optional[contextlib.AbstractContextManager[Future[RuntimeResults]]] = None
|
|
248
311
|
runtime_results_callable: Optional[Future[RuntimeResults]] = None
|
|
249
312
|
enable_assertion: bool = True
|
|
250
|
--
record_property: Optional[Callable[[str, object], None]] = None
|
|
251
313
|
|
|
252
314
|
def __enter__(self) -> Future[AssertRuntimeResults]:
|
|
253
|
--
self.entry_file, self.entry_line = caller_file_and_line(
|
|
315
|
++
self.entry_file, self.entry_line = caller_file_and_line(
|
|
316
|
++
relative_to=(
|
|
317
|
++
pathlib.Path(chia.__file__).parent.parent,
|
|
318
|
++
pathlib.Path(tests.__file__).parent.parent,
|
|
319
|
++
)
|
|
320
|
++
)
|
|
254
321
|
|
|
255
322
|
self.runtime_manager = measure_runtime(
|
|
256
323
|
clock=self.clock, gc_mode=self.gc_mode, overhead=self.overhead, print_results=False
|
|
@@@ -290,16 -283,16 +357,19 @@@
|
|
|
290
357
|
if self.print:
|
|
291
358
|
print(results.block(label=self.label))
|
|
292
359
|
|
|
293
|
--
if
|
|
294
|
--
|
|
295
|
--
|
|
296
|
--
|
|
297
|
--
|
|
360
|
++
if ether.record_property is not None:
|
|
361
|
++
data = BenchmarkData(
|
|
362
|
++
duration=results.duration,
|
|
363
|
++
path=pathlib.Path(self.entry_file),
|
|
364
|
++
line=self.entry_line,
|
|
365
|
++
limit=self.seconds,
|
|
366
|
++
label=self.label,
|
|
298
367
|
)
|
|
299
368
|
|
|
300
|
--
|
|
301
|
--
|
|
302
|
--
|
|
369
|
++
ether.record_property( # pylint: disable=E1102
|
|
370
|
++
data.tag,
|
|
371
|
++
json.dumps(data.marshal(), ensure_ascii=True, sort_keys=True),
|
|
372
|
++
)
|
|
303
373
|
|
|
304
374
|
if exc_type is None and self.enable_assertion:
|
|
305
375
|
__tracebackhide__ = True
|
|
@@@ -310,15 -303,15 +380,13 @@@
|
|
|
310
380
|
@dataclasses.dataclass
|
|
311
381
|
class BenchmarkRunner:
|
|
312
382
|
enable_assertion: bool = True
|
|
313
|
--
|
|
383
|
++
test_id: Optional[TestId] = None
|
|
314
384
|
overhead: Optional[float] = None
|
|
315
|
--
record_property: Optional[Callable[[str, object], None]] = None
|
|
316
385
|
|
|
317
386
|
@functools.wraps(_AssertRuntime)
|
|
318
387
|
def assert_runtime(self, *args: Any, **kwargs: Any) -> _AssertRuntime:
|
|
319
388
|
kwargs.setdefault("enable_assertion", self.enable_assertion)
|
|
320
389
|
kwargs.setdefault("overhead", self.overhead)
|
|
321
|
--
kwargs.setdefault("record_property", self.record_property)
|
|
322
390
|
return _AssertRuntime(*args, **kwargs)
|
|
323
391
|
|
|
324
392
|
|
|
@@@ -414,73 -407,3 +482,153 @@@ def create_logger(file: TextIO = sys.st
|
|
|
414
482
|
logger.addHandler(hdlr=stream_handler)
|
|
415
483
|
|
|
416
484
|
return logger
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
def invariant_check_mempool(mempool: Mempool) -> None:
|
|
488
|
+
with mempool._db_conn:
|
|
489
|
+
cursor = mempool._db_conn.execute("SELECT SUM(cost) FROM tx")
|
|
490
|
+
val = cursor.fetchone()[0]
|
|
491
|
+
if val is None:
|
|
492
|
+
val = 0
|
|
493
|
+
assert mempool._total_cost == val
|
|
494
|
+
|
|
495
|
+
with mempool._db_conn:
|
|
496
|
+
cursor = mempool._db_conn.execute("SELECT SUM(fee) FROM tx")
|
|
497
|
+
val = cursor.fetchone()[0]
|
|
498
|
+
if val is None:
|
|
499
|
+
val = 0
|
|
500
|
+
assert mempool._total_fee == val
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
async def wallet_height_at_least(wallet_node: WalletNode, h: uint32) -> bool:
|
|
504
|
+
height = await wallet_node.wallet_state_manager.blockchain.get_finished_sync_up_to()
|
|
505
|
+
return height == h
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
@final
|
|
509
|
+
@dataclass
|
|
510
|
+
class RecordingWebServer:
|
|
511
|
+
web_server: WebServer
|
|
512
|
+
requests: List[web.Request] = field(default_factory=list)
|
|
513
|
+
|
|
514
|
+
@classmethod
|
|
515
|
+
async def create(
|
|
516
|
+
cls,
|
|
517
|
+
hostname: str,
|
|
518
|
+
port: uint16,
|
|
519
|
+
max_request_body_size: int = 1024**2, # Default `client_max_size` from web.Application
|
|
520
|
+
ssl_context: Optional[ssl.SSLContext] = None,
|
|
521
|
+
prefer_ipv6: bool = False,
|
|
522
|
+
) -> RecordingWebServer:
|
|
523
|
+
web_server = await WebServer.create(
|
|
524
|
+
hostname=hostname,
|
|
525
|
+
port=port,
|
|
526
|
+
max_request_body_size=max_request_body_size,
|
|
527
|
+
ssl_context=ssl_context,
|
|
528
|
+
prefer_ipv6=prefer_ipv6,
|
|
529
|
+
start=False,
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
self = cls(web_server=web_server)
|
|
533
|
+
routes = [web.route(method="*", path=route, handler=func) for (route, func) in self.get_routes().items()]
|
|
534
|
+
web_server.add_routes(routes=routes)
|
|
535
|
+
await web_server.start()
|
|
536
|
+
return self
|
|
537
|
+
|
|
538
|
+
def get_routes(self) -> Dict[str, Callable[[web.Request], Awaitable[web.Response]]]:
|
|
539
|
+
return {"/{path:.*}": self.handler}
|
|
540
|
+
|
|
541
|
+
async def handler(self, request: web.Request) -> web.Response:
|
|
542
|
+
self.requests.append(request)
|
|
543
|
+
|
|
544
|
+
request_json = await request.json()
|
|
545
|
+
if isinstance(request_json, dict) and "response" in request_json:
|
|
546
|
+
response = request_json["response"]
|
|
547
|
+
else:
|
|
548
|
+
response = {"success": True}
|
|
549
|
+
|
|
550
|
+
return aiohttp.web.json_response(data=response)
|
|
551
|
+
|
|
552
|
+
async def await_closed(self) -> None:
|
|
553
|
+
self.web_server.close()
|
|
554
|
+
await self.web_server.await_closed()
|
|
555
|
++
|
|
556
|
++
|
|
557
|
++
@final
|
|
558
|
++
@dataclasses.dataclass(frozen=True)
|
|
559
|
++
class TestId:
|
|
560
|
++
platform: str
|
|
561
|
++
test_path: Tuple[str, ...]
|
|
562
|
++
ids: Tuple[str, ...]
|
|
563
|
++
|
|
564
|
++
@classmethod
|
|
565
|
++
def create(cls, node: Node, platform: str = sys.platform) -> TestId:
|
|
566
|
++
test_path: List[str] = []
|
|
567
|
++
temp_node = node
|
|
568
|
++
while True:
|
|
569
|
++
name: str
|
|
570
|
++
if isinstance(temp_node, pytest.Function):
|
|
571
|
++
name = temp_node.originalname
|
|
572
|
++
elif isinstance(temp_node, pytest.Package):
|
|
573
|
++
# must check before pytest.Module since Package is a subclass
|
|
574
|
++
name = temp_node.name
|
|
575
|
++
elif isinstance(temp_node, pytest.Module):
|
|
576
|
++
name = temp_node.name[:-3]
|
|
577
|
++
else:
|
|
578
|
++
name = temp_node.name
|
|
579
|
++
test_path.insert(0, name)
|
|
580
|
++
if isinstance(temp_node.parent, pytest.Session) or temp_node.parent is None:
|
|
581
|
++
break
|
|
582
|
++
temp_node = temp_node.parent
|
|
583
|
++
|
|
584
|
++
# TODO: can we avoid parsing the id's etc from the node name?
|
|
585
|
++
test_name, delimiter, rest = node.name.partition("[")
|
|
586
|
++
ids: Tuple[str, ...]
|
|
587
|
++
if delimiter == "":
|
|
588
|
++
ids = ()
|
|
589
|
++
else:
|
|
590
|
++
ids = tuple(rest.rstrip("]").split("-"))
|
|
591
|
++
|
|
592
|
++
return cls(
|
|
593
|
++
platform=platform,
|
|
594
|
++
test_path=tuple(test_path),
|
|
595
|
++
ids=ids,
|
|
596
|
++
)
|
|
597
|
++
|
|
598
|
++
@classmethod
|
|
599
|
++
def unmarshal(cls, marshalled: Dict[str, Any]) -> TestId:
|
|
600
|
++
return cls(
|
|
601
|
++
platform=marshalled["platform"],
|
|
602
|
++
test_path=tuple(marshalled["test_path"]),
|
|
603
|
++
ids=tuple(marshalled["ids"]),
|
|
604
|
++
)
|
|
605
|
++
|
|
606
|
++
def marshal(self) -> Dict[str, Any]:
|
|
607
|
++
return {
|
|
608
|
++
"platform": self.platform,
|
|
609
|
++
"test_path": self.test_path,
|
|
610
|
++
"ids": self.ids,
|
|
611
|
++
}
|
|
612
|
++
|
|
613
|
++
|
|
614
|
++
T = TypeVar("T")
|
|
615
|
++
|
|
616
|
++
|
|
617
|
++
@dataclasses.dataclass(frozen=True)
|
|
618
|
++
class DataTypeProtocol(Protocol):
|
|
619
|
++
tag: ClassVar[str]
|
|
620
|
++
|
|
621
|
++
line: int
|
|
622
|
++
path: Path
|
|
623
|
++
label: str
|
|
624
|
++
duration: float
|
|
625
|
++
limit: float
|
|
626
|
++
|
|
627
|
++
__match_args__: ClassVar[Tuple[str, ...]] = ()
|
|
628
|
++
|
|
629
|
++
@classmethod
|
|
630
|
++
def unmarshal(cls: Type[T], marshalled: Dict[str, Any]) -> T:
|
|
631
|
++
...
|
|
632
|
++
|
|
633
|
++
def marshal(self) -> Dict[str, Any]:
|
|
634
|
++
...
|
|
@@@ -15,6 -15,6 +15,7 @@@ from chia.types.spend_bundle_condition
|
|
|
15
15
|
from chia.util.condition_tools import parse_sexp_to_conditions, pkm_pairs, pkm_pairs_for_conditions_dict
|
|
16
16
|
from chia.util.errors import ConsensusError
|
|
17
17
|
from chia.util.hash import std_hash
|
|
18
|
++
from chia.util.ints import uint64
|
|
18
19
|
|
|
19
20
|
H1 = bytes32(b"a" * 32)
|
|
20
21
|
H2 = bytes32(b"b" * 32)
|
|
@@@ -23,7 -23,7 +24,7 @@@ H3 = bytes32(b"c" * 32
|
|
|
23
24
|
PK1 = G1Element.generator()
|
|
24
25
|
PK2 = G1Element.generator()
|
|
25
26
|
|
|
26
|
--
TEST_COIN = Coin(H1, H2, 123)
|
|
27
|
++
TEST_COIN = Coin(H1, H2, uint64(123))
|
|
27
28
|
|
|
28
29
|
|
|
29
30
|
def mk_agg_sig_conditions(
|
|
@@@ -1,19 -1,16 +1,22 @@@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import contextlib
|
|
4
|
-
from typing import AsyncIterator, Iterator, List, Optional, TypeVar
|
|
5
|
-
from typing import AsyncIterator, Iterator, List
|
|
4
|
++
from typing import AsyncIterator, Iterator, List, Optional, Tuple, Type, TypeVar
|
|
6
5
|
|
|
6
|
+
import aiohttp
|
|
7
|
+
import anyio
|
|
7
8
|
import pytest
|
|
8
9
|
|
|
10
|
++
from chia.types.blockchain_format.program import Program
|
|
9
11
|
from chia.util.errors import InvalidPathError
|
|
12
|
++
from chia.util.ints import uint64
|
|
10
13
|
from chia.util.misc import (
|
|
11
14
|
SplitAsyncManager,
|
|
12
15
|
SplitManager,
|
|
16
|
+
ValuedEvent,
|
|
13
17
|
format_bytes,
|
|
14
18
|
format_minutes,
|
|
19
|
++
satisfies_hint,
|
|
15
20
|
split_async_manager,
|
|
16
21
|
split_manager,
|
|
17
22
|
to_batches,
|
|
@@@ -313,118 -306,3 +316,139 @@@ async def test_split_async_manager_rais
|
|
|
313
316
|
|
|
314
317
|
with pytest.raises(Exception, match="^not yet entered$"):
|
|
315
318
|
await split.exit()
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
async def wait_for_valued_event_waiters(
|
|
322
|
+
event: ValuedEvent[T],
|
|
323
|
+
count: int,
|
|
324
|
+
timeout: float = 10,
|
|
325
|
+
) -> None:
|
|
326
|
+
with anyio.fail_after(delay=adjusted_timeout(timeout)):
|
|
327
|
+
for delay in backoff_times():
|
|
328
|
+
# ignoring the type since i'm hacking into the private attribute
|
|
329
|
+
# hopefully this is ok for testing and if it becomes invalid we
|
|
330
|
+
# will end up with an exception and can adjust then
|
|
331
|
+
if len(event._event._waiters) >= count: # type: ignore[attr-defined]
|
|
332
|
+
return
|
|
333
|
+
await anyio.sleep(delay)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
@pytest.mark.anyio
|
|
337
|
+
async def test_valued_event_wait_already_set() -> None:
|
|
338
|
+
valued_event = ValuedEvent[int]()
|
|
339
|
+
value = 37
|
|
340
|
+
valued_event.set(value)
|
|
341
|
+
|
|
342
|
+
with anyio.fail_after(adjusted_timeout(10)):
|
|
343
|
+
result = await valued_event.wait()
|
|
344
|
+
|
|
345
|
+
assert result == value
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
@pytest.mark.anyio
|
|
349
|
+
async def test_valued_event_wait_not_yet_set() -> None:
|
|
350
|
+
valued_event = ValuedEvent[int]()
|
|
351
|
+
value = 37
|
|
352
|
+
result: Optional[int] = None
|
|
353
|
+
|
|
354
|
+
async def wait(valued_event: ValuedEvent[int]) -> None:
|
|
355
|
+
nonlocal result
|
|
356
|
+
result = await valued_event.wait()
|
|
357
|
+
|
|
358
|
+
with anyio.fail_after(adjusted_timeout(10)):
|
|
359
|
+
async with anyio.create_task_group() as task_group:
|
|
360
|
+
task_group.start_soon(wait, valued_event)
|
|
361
|
+
await wait_for_valued_event_waiters(event=valued_event, count=1)
|
|
362
|
+
valued_event.set(value)
|
|
363
|
+
|
|
364
|
+
assert result == value
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
@pytest.mark.anyio
|
|
368
|
+
async def test_valued_event_wait_blocks_when_not_set() -> None:
|
|
369
|
+
valued_event = ValuedEvent[int]()
|
|
370
|
+
with pytest.raises(TimeoutError):
|
|
371
|
+
# if we could just process until there are no pending events, that would be great
|
|
372
|
+
with anyio.fail_after(adjusted_timeout(1)):
|
|
373
|
+
await valued_event.wait()
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
@pytest.mark.anyio
|
|
377
|
+
async def test_valued_event_multiple_waits_all_get_values() -> None:
|
|
378
|
+
results: List[int] = []
|
|
379
|
+
valued_event = ValuedEvent[int]()
|
|
380
|
+
value = 37
|
|
381
|
+
task_count = 10
|
|
382
|
+
|
|
383
|
+
async def wait_and_append() -> None:
|
|
384
|
+
results.append(await valued_event.wait())
|
|
385
|
+
|
|
386
|
+
async with anyio.create_task_group() as task_group:
|
|
387
|
+
for i in range(task_count):
|
|
388
|
+
task_group.start_soon(wait_and_append, name=f"wait_and_append_{i}")
|
|
389
|
+
|
|
390
|
+
await wait_for_valued_event_waiters(event=valued_event, count=task_count)
|
|
391
|
+
valued_event.set(value)
|
|
392
|
+
|
|
393
|
+
assert results == [value] * task_count
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
@pytest.mark.anyio
|
|
397
|
+
async def test_valued_event_set_again_raises_and_does_not_change_value() -> None:
|
|
398
|
+
valued_event = ValuedEvent[int]()
|
|
399
|
+
value = 37
|
|
400
|
+
valued_event.set(value)
|
|
401
|
+
|
|
402
|
+
with pytest.raises(Exception, match="^Value already set$"):
|
|
403
|
+
valued_event.set(value + 1)
|
|
404
|
+
|
|
405
|
+
with anyio.fail_after(adjusted_timeout(10)):
|
|
406
|
+
result = await valued_event.wait()
|
|
407
|
+
|
|
408
|
+
assert result == value
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
@pytest.mark.anyio
|
|
412
|
+
async def test_valued_event_wait_raises_if_not_set() -> None:
|
|
413
|
+
valued_event = ValuedEvent[int]()
|
|
414
|
+
valued_event._event.set()
|
|
415
|
+
|
|
416
|
+
with pytest.raises(Exception, match="^Value not set despite event being set$"):
|
|
417
|
+
with anyio.fail_after(adjusted_timeout(10)):
|
|
418
|
+
await valued_event.wait()
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
@pytest.mark.anyio
|
|
422
|
+
async def test_recording_web_server_specified_response(
|
|
423
|
+
recording_web_server: RecordingWebServer,
|
|
424
|
+
) -> None:
|
|
425
|
+
expected_response = {"success": True, "magic": "asparagus"}
|
|
426
|
+
|
|
427
|
+
async with aiohttp.ClientSession() as session:
|
|
428
|
+
async with session.post(
|
|
429
|
+
url=recording_web_server.web_server.url(),
|
|
430
|
+
json={"response": expected_response},
|
|
431
|
+
) as response:
|
|
432
|
+
response.raise_for_status()
|
|
433
|
+
assert await response.json() == expected_response
|
|
434
|
++
|
|
435
|
++
|
|
436
|
++
@pytest.mark.parametrize(
|
|
437
|
++
"obj, type_hint, expected_result",
|
|
438
|
++
[
|
|
439
|
++
(42, int, True),
|
|
440
|
++
(42, uint64, False),
|
|
441
|
++
(uint64(42), uint64, True),
|
|
442
|
++
("42", int, False),
|
|
443
|
++
([4, 2], List[int], True),
|
|
444
|
++
([4, "2"], List[int], False),
|
|
445
|
++
((4, 2), Tuple[int, int], True),
|
|
446
|
++
((4, "2"), Tuple[int, int], False),
|
|
447
|
++
((4, 2), Tuple[int, ...], True),
|
|
448
|
++
((4, "2"), Tuple[int, ...], False),
|
|
449
|
++
([(4, Program.to([2]))], List[Tuple[int, Program]], True),
|
|
450
|
++
([(4, "2")], Tuple[int, str], False),
|
|
451
|
++
],
|
|
452
|
++
)
|
|
453
|
++
def test_satisfies_hint(obj: T, type_hint: Type[T], expected_result: bool) -> None:
|
|
454
|
++
assert satisfies_hint(obj, type_hint) == expected_result
|
|
@@@ -1,39 -1,39 +1,130 @@@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
|
++
import dataclasses
|
|
5
|
++
import json
|
|
4
6
|
import logging
|
|
7
|
++
import pathlib
|
|
5
8
|
import time
|
|
6
|
--
from typing import Callable
|
|
9
|
++
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Tuple, cast, final
|
|
7
10
|
|
|
11
|
++
import chia
|
|
12
|
++
import tests
|
|
8
13
|
from chia.protocols.protocol_message_types import ProtocolMessageTypes
|
|
14
|
++
from chia.util.misc import caller_file_and_line
|
|
9
15
|
from chia.util.timing import adjusted_timeout
|
|
16
|
++
from tests import ether
|
|
17
|
++
from tests.util.misc import DataTypeProtocol
|
|
10
18
|
|
|
11
19
|
log = logging.getLogger(__name__)
|
|
12
20
|
|
|
13
21
|
|
|
14
|
--
|
|
22
|
++
@final
|
|
23
|
++
@dataclasses.dataclass(frozen=True)
|
|
24
|
++
class TimeOutAssertData:
|
|
25
|
++
if TYPE_CHECKING:
|
|
26
|
++
_protocol_check: ClassVar[DataTypeProtocol] = cast("TimeOutAssertData", None)
|
|
27
|
++
|
|
28
|
++
tag: ClassVar[str] = "time_out_assert"
|
|
29
|
++
|
|
30
|
++
duration: float
|
|
31
|
++
path: pathlib.Path
|
|
32
|
++
line: int
|
|
33
|
++
limit: float
|
|
34
|
++
timed_out: bool
|
|
35
|
++
|
|
36
|
++
label: str = ""
|
|
37
|
++
|
|
38
|
++
__match_args__: ClassVar[Tuple[str, ...]] = ()
|
|
39
|
++
|
|
40
|
++
@classmethod
|
|
41
|
++
def unmarshal(cls, marshalled: Dict[str, Any]) -> TimeOutAssertData:
|
|
42
|
++
return cls(
|
|
43
|
++
duration=marshalled["duration"],
|
|
44
|
++
path=pathlib.Path(marshalled["path"]),
|
|
45
|
++
line=int(marshalled["line"]),
|
|
46
|
++
limit=marshalled["limit"],
|
|
47
|
++
timed_out=marshalled["timed_out"],
|
|
48
|
++
)
|
|
49
|
++
|
|
50
|
++
def marshal(self) -> Dict[str, Any]:
|
|
51
|
++
return {
|
|
52
|
++
"duration": self.duration,
|
|
53
|
++
"path": self.path.as_posix(),
|
|
54
|
++
"line": self.line,
|
|
55
|
++
"limit": self.limit,
|
|
56
|
++
"timed_out": self.timed_out,
|
|
57
|
++
}
|
|
58
|
++
|
|
59
|
++
|
|
60
|
++
async def time_out_assert_custom_interval(
|
|
61
|
++
timeout: float, interval, function, value=True, *args, stack_distance=0, **kwargs
|
|
62
|
++
):
|
|
15
63
|
__tracebackhide__ = True
|
|
16
64
|
|
|
65
|
++
entry_file, entry_line = caller_file_and_line(
|
|
66
|
++
distance=stack_distance + 1,
|
|
67
|
++
relative_to=(
|
|
68
|
++
pathlib.Path(chia.__file__).parent.parent,
|
|
69
|
++
pathlib.Path(tests.__file__).parent.parent,
|
|
70
|
++
),
|
|
71
|
++
)
|
|
72
|
++
|
|
17
73
|
timeout = adjusted_timeout(timeout=timeout)
|
|
18
74
|
|
|
19
|
--
start = time.
|
|
20
|
--
|
|
21
|
--
|
|
22
|
--
|
|
23
|
--
|
|
24
|
--
|
|
25
|
--
|
|
26
|
--
|
|
27
|
--
|
|
28
|
--
|
|
75
|
++
start = time.monotonic()
|
|
76
|
++
duration = 0.0
|
|
77
|
++
timed_out = False
|
|
78
|
++
try:
|
|
79
|
++
while True:
|
|
80
|
++
if asyncio.iscoroutinefunction(function):
|
|
81
|
++
f_res = await function(*args, **kwargs)
|
|
82
|
++
else:
|
|
83
|
++
f_res = function(*args, **kwargs)
|
|
84
|
++
|
|
85
|
++
if value == f_res:
|
|
86
|
++
return None
|
|
87
|
++
|
|
88
|
++
now = time.monotonic()
|
|
89
|
++
duration = now - start
|
|
90
|
++
|
|
91
|
++
if duration > timeout:
|
|
92
|
++
timed_out = True
|
|
93
|
++
assert False, f"Timed assertion timed out after {timeout} seconds: expected {value!r}, got {f_res!r}"
|
|
94
|
++
|
|
95
|
++
await asyncio.sleep(min(interval, timeout - duration))
|
|
96
|
++
finally:
|
|
97
|
++
if ether.record_property is not None:
|
|
98
|
++
data = TimeOutAssertData(
|
|
99
|
++
duration=duration,
|
|
100
|
++
path=pathlib.Path(entry_file),
|
|
101
|
++
line=entry_line,
|
|
102
|
++
limit=timeout,
|
|
103
|
++
timed_out=timed_out,
|
|
104
|
++
)
|
|
105
|
++
|
|
106
|
++
ether.record_property( # pylint: disable=E1102
|
|
107
|
++
data.tag,
|
|
108
|
++
json.dumps(data.marshal(), ensure_ascii=True, sort_keys=True),
|
|
109
|
++
)
|
|
29
110
|
|
|
30
111
|
|
|
31
112
|
async def time_out_assert(timeout: int, function, value=True, *args, **kwargs):
|
|
32
113
|
__tracebackhide__ = True
|
|
33
|
--
await time_out_assert_custom_interval(
|
|
114
|
++
await time_out_assert_custom_interval(
|
|
115
|
++
timeout,
|
|
116
|
++
0.05,
|
|
117
|
++
function,
|
|
118
|
++
value,
|
|
119
|
++
*args,
|
|
120
|
++
**kwargs,
|
|
121
|
++
stack_distance=1,
|
|
122
|
++
)
|
|
34
123
|
|
|
35
124
|
|
|
36
125
|
async def time_out_assert_not_none(timeout: float, function, *args, **kwargs):
|
|
126
|
++
# TODO: rework to leverage time_out_assert_custom_interval() such as by allowing
|
|
127
|
++
# value to be a callable
|
|
37
128
|
__tracebackhide__ = True
|
|
38
129
|
|
|
39
130
|
timeout = adjusted_timeout(timeout=timeout)
|
|
@@@ -38,952 -39,1016 +38,910 @@@ from tests.util.setup_nodes import OldS
|
|
|
38
38
|
from tests.util.time_out_assert import time_out_assert, time_out_assert_not_none
|
|
39
39
|
|
|
40
40
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
"trusted",
|
|
44
|
-
[True, False],
|
|
45
|
-
)
|
|
46
|
-
@pytest.mark.anyio
|
|
47
|
-
async def test_cat_creation(self, self_hostname, two_wallet_nodes, trusted):
|
|
48
|
-
num_blocks = 3
|
|
49
|
-
full_nodes, wallets, _ = two_wallet_nodes
|
|
50
|
-
full_node_api = full_nodes[0]
|
|
51
|
-
full_node_server = full_node_api.server
|
|
52
|
-
wallet_node, server_2 = wallets[0]
|
|
53
|
-
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
54
|
-
|
|
55
|
-
ph = await wallet.get_new_puzzlehash()
|
|
56
|
-
if trusted:
|
|
57
|
-
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
58
|
-
else:
|
|
59
|
-
wallet_node.config["trusted_peers"] = {}
|
|
60
|
-
|
|
61
|
-
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
62
|
-
for i in range(0, num_blocks):
|
|
63
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
64
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
|
|
65
|
-
|
|
66
|
-
funds = sum(
|
|
67
|
-
[
|
|
68
|
-
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
|
|
69
|
-
for i in range(1, num_blocks + 1)
|
|
70
|
-
]
|
|
71
|
-
)
|
|
41
|
+
def check_wallets(node: WalletNode) -> int:
|
|
42
|
+
return len(node.wallet_state_manager.wallets.keys())
|
|
72
43
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
cat_wallet = await CATWallet.create(wallet_node.wallet_state_manager, wallet, cat_wallet.wallet_info)
|
|
87
|
-
await wallet_node.wallet_state_manager.add_new_wallet(cat_wallet)
|
|
88
|
-
|
|
89
|
-
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
|
|
90
|
-
tx_record = tx_queue[0]
|
|
91
|
-
await full_node_api.process_transaction_records(records=[tx_record])
|
|
92
|
-
|
|
93
|
-
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
|
|
94
|
-
await time_out_assert(20, cat_wallet.get_spendable_balance, 100)
|
|
95
|
-
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
|
|
96
|
-
await time_out_assert(20, wallet.get_confirmed_balance, funds - 110)
|
|
97
|
-
await time_out_assert(20, wallet.get_spendable_balance, funds - 110)
|
|
98
|
-
await time_out_assert(20, wallet.get_unconfirmed_balance, funds - 110)
|
|
99
|
-
|
|
100
|
-
# Test migration
|
|
101
|
-
all_lineage = await cat_wallet.lineage_store.get_all_lineage_proofs()
|
|
102
|
-
current_info = cat_wallet.wallet_info
|
|
103
|
-
data_str = bytes(
|
|
104
|
-
LegacyCATInfo(
|
|
105
|
-
cat_wallet.cat_info.limitations_program_hash, cat_wallet.cat_info.my_tail, list(all_lineage.items())
|
|
106
|
-
)
|
|
107
|
-
).hex()
|
|
108
|
-
wallet_info = WalletInfo(current_info.id, current_info.name, current_info.type, data_str)
|
|
109
|
-
new_cat_wallet = await CATWallet.create(wallet_node.wallet_state_manager, wallet, wallet_info)
|
|
110
|
-
assert new_cat_wallet.cat_info.limitations_program_hash == cat_wallet.cat_info.limitations_program_hash
|
|
111
|
-
assert new_cat_wallet.cat_info.my_tail == cat_wallet.cat_info.my_tail
|
|
112
|
-
assert await cat_wallet.lineage_store.get_all_lineage_proofs() == all_lineage
|
|
113
|
-
|
|
114
|
-
height = full_node_api.full_node.blockchain.get_peak_height()
|
|
115
|
-
await full_node_api.reorg_from_index_to_new_index(
|
|
116
|
-
ReorgProtocol(height - num_blocks - 1, height + 1, 32 * b"1", None)
|
|
117
|
-
)
|
|
118
|
-
await time_out_assert(20, cat_wallet.get_confirmed_balance, 0)
|
|
119
|
-
|
|
120
|
-
@pytest.mark.anyio
|
|
121
|
-
async def test_cat_creation_unique_lineage_store(self, self_hostname, two_wallet_nodes):
|
|
122
|
-
num_blocks = 3
|
|
123
|
-
full_nodes, wallets, _ = two_wallet_nodes
|
|
124
|
-
full_node_api = full_nodes[0]
|
|
125
|
-
full_node_server = full_node_api.server
|
|
126
|
-
wallet_node, wallet_server = wallets[0]
|
|
127
|
-
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
128
|
-
ph = await wallet.get_new_puzzlehash()
|
|
44
|
+
|
|
45
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
46
|
+
@pytest.mark.anyio
|
|
47
|
+
async def test_cat_creation(self_hostname: str, two_wallet_nodes: OldSimulatorsAndWallets, trusted: bool) -> None:
|
|
48
|
+
num_blocks = 3
|
|
49
|
+
full_nodes, wallets, _ = two_wallet_nodes
|
|
50
|
+
full_node_api = full_nodes[0]
|
|
51
|
+
full_node_server = full_node_api.server
|
|
52
|
+
wallet_node, server_2 = wallets[0]
|
|
53
|
+
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
54
|
+
|
|
55
|
+
ph = await wallet.get_new_puzzlehash()
|
|
56
|
+
if trusted:
|
|
129
57
|
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
58
|
+
else:
|
|
59
|
+
wallet_node.config["trusted_peers"] = {}
|
|
130
60
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
61
|
+
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
62
|
+
for _ in range(num_blocks):
|
|
63
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
64
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
135
65
|
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
for i in range(1, num_blocks + 1)
|
|
140
|
-
]
|
|
141
|
-
)
|
|
66
|
+
funds = sum(
|
|
67
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
68
|
+
)
|
|
142
69
|
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
cat_wallet_1, _ = await CATWallet.create_new_cat_wallet(
|
|
148
|
-
wallet_node.wallet_state_manager,
|
|
149
|
-
wallet,
|
|
150
|
-
{"identifier": "genesis_by_id"},
|
|
151
|
-
uint64(100),
|
|
152
|
-
DEFAULT_TX_CONFIG,
|
|
153
|
-
)
|
|
154
|
-
cat_wallet_2, _ = await CATWallet.create_new_cat_wallet(
|
|
155
|
-
wallet_node.wallet_state_manager,
|
|
156
|
-
wallet,
|
|
157
|
-
{"identifier": "genesis_by_id"},
|
|
158
|
-
uint64(200),
|
|
159
|
-
DEFAULT_TX_CONFIG,
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
proofs_1 = await cat_wallet_1.lineage_store.get_all_lineage_proofs()
|
|
163
|
-
proofs_2 = await cat_wallet_2.lineage_store.get_all_lineage_proofs()
|
|
164
|
-
assert len(proofs_1) == len(proofs_2)
|
|
165
|
-
assert proofs_1 != proofs_2
|
|
166
|
-
assert cat_wallet_1.lineage_store.table_name != cat_wallet_2.lineage_store.table_name
|
|
167
|
-
|
|
168
|
-
@pytest.mark.parametrize(
|
|
169
|
-
"trusted",
|
|
170
|
-
[True, False],
|
|
70
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
71
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
72
|
+
|
|
73
|
+
async with wallet_node.wallet_state_manager.lock:
|
|
171
|
-
cat_wallet,
|
|
74
|
++
cat_wallet, tx_records = await CATWallet.create_new_cat_wallet(
|
|
75
|
+
wallet_node.wallet_state_manager,
|
|
76
|
+
wallet,
|
|
77
|
+
{"identifier": "genesis_by_id"},
|
|
78
|
+
uint64(100),
|
|
79
|
+
DEFAULT_TX_CONFIG,
|
|
80
|
+
fee=uint64(10),
|
|
81
|
+
)
|
|
82
|
+
# The next 2 lines are basically a noop, it just adds test coverage
|
|
83
|
+
cat_wallet = await CATWallet.create(wallet_node.wallet_state_manager, wallet, cat_wallet.wallet_info)
|
|
84
|
+
await wallet_node.wallet_state_manager.add_new_wallet(cat_wallet)
|
|
85
|
+
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
await
|
|
86
|
++
await full_node_api.process_transaction_records(records=tx_records)
|
|
87
|
+
|
|
88
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
|
|
89
|
+
await time_out_assert(20, cat_wallet.get_spendable_balance, 100)
|
|
90
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
|
|
91
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds - 110)
|
|
92
|
+
await time_out_assert(20, wallet.get_spendable_balance, funds - 110)
|
|
93
|
+
await time_out_assert(20, wallet.get_unconfirmed_balance, funds - 110)
|
|
94
|
+
|
|
95
|
+
# Test migration
|
|
96
|
+
all_lineage = await cat_wallet.lineage_store.get_all_lineage_proofs()
|
|
97
|
+
current_info = cat_wallet.wallet_info
|
|
98
|
+
data_str = bytes(
|
|
99
|
+
LegacyCATInfo(
|
|
100
|
+
cat_wallet.cat_info.limitations_program_hash, cat_wallet.cat_info.my_tail, list(all_lineage.items())
|
|
101
|
+
)
|
|
102
|
+
).hex()
|
|
103
|
+
wallet_info = WalletInfo(current_info.id, current_info.name, current_info.type, data_str)
|
|
104
|
+
new_cat_wallet = await CATWallet.create(wallet_node.wallet_state_manager, wallet, wallet_info)
|
|
105
|
+
assert new_cat_wallet.cat_info.limitations_program_hash == cat_wallet.cat_info.limitations_program_hash
|
|
106
|
+
assert new_cat_wallet.cat_info.my_tail == cat_wallet.cat_info.my_tail
|
|
107
|
+
assert await cat_wallet.lineage_store.get_all_lineage_proofs() == all_lineage
|
|
108
|
+
|
|
109
|
+
height = full_node_api.full_node.blockchain.get_peak_height()
|
|
110
|
+
assert height is not None
|
|
111
|
+
await full_node_api.reorg_from_index_to_new_index(
|
|
112
|
+
ReorgProtocol(uint32(height - num_blocks - 1), uint32(height + 1), bytes32(32 * b"1"), None)
|
|
175
113
|
)
|
|
176
|
-
|
|
177
|
-
async def test_cat_spend(self, self_hostname, two_wallet_nodes, trusted):
|
|
178
|
-
num_blocks = 3
|
|
179
|
-
full_nodes, wallets, _ = two_wallet_nodes
|
|
180
|
-
full_node_api = full_nodes[0]
|
|
181
|
-
full_node_server = full_node_api.server
|
|
182
|
-
wallet_node, server_2 = wallets[0]
|
|
183
|
-
wallet_node_2, server_3 = wallets[1]
|
|
184
|
-
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
185
|
-
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
186
|
-
api_0 = WalletRpcApi(wallet_node)
|
|
187
|
-
api_1 = WalletRpcApi(wallet_node_2)
|
|
188
|
-
ph = await wallet.get_new_puzzlehash()
|
|
189
|
-
if trusted:
|
|
190
|
-
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
191
|
-
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
192
|
-
else:
|
|
193
|
-
wallet_node.config["trusted_peers"] = {}
|
|
194
|
-
wallet_node_2.config["trusted_peers"] = {}
|
|
195
|
-
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
196
|
-
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
197
|
-
|
|
198
|
-
for i in range(0, num_blocks):
|
|
199
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
200
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
|
|
201
|
-
|
|
202
|
-
funds = sum(
|
|
203
|
-
[
|
|
204
|
-
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
|
|
205
|
-
for i in range(1, num_blocks + 1)
|
|
206
|
-
]
|
|
207
|
-
)
|
|
114
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 0)
|
|
208
115
|
|
|
209
|
-
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
210
116
|
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
117
|
+
@pytest.mark.anyio
|
|
118
|
+
async def test_cat_creation_unique_lineage_store(self_hostname: str, two_wallet_nodes: OldSimulatorsAndWallets) -> None:
|
|
119
|
+
num_blocks = 3
|
|
120
|
+
full_nodes, wallets, _ = two_wallet_nodes
|
|
121
|
+
full_node_api = full_nodes[0]
|
|
122
|
+
full_node_server = full_node_api.server
|
|
123
|
+
wallet_node, wallet_server = wallets[0]
|
|
124
|
+
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
125
|
+
ph = await wallet.get_new_puzzlehash()
|
|
126
|
+
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
127
|
+
|
|
128
|
+
await wallet_server.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
129
|
+
for _ in range(num_blocks):
|
|
130
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
131
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
222
132
|
|
|
223
|
-
|
|
224
|
-
|
|
133
|
+
funds = sum(
|
|
134
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
135
|
+
)
|
|
225
136
|
|
|
226
|
-
|
|
227
|
-
|
|
137
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
138
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
228
139
|
|
|
229
|
-
|
|
230
|
-
|
|
140
|
+
async with wallet_node.wallet_state_manager.lock:
|
|
141
|
+
cat_wallet_1, _ = await CATWallet.create_new_cat_wallet(
|
|
231
|
-
wallet_node.wallet_state_manager,
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
uint64(100),
|
|
235
|
-
DEFAULT_TX_CONFIG,
|
|
142
|
++
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100), DEFAULT_TX_CONFIG
|
|
143
|
+
)
|
|
144
|
+
cat_wallet_2, _ = await CATWallet.create_new_cat_wallet(
|
|
236
|
-
wallet_node.wallet_state_manager,
|
|
237
|
-
wallet,
|
|
238
|
-
{"identifier": "genesis_by_id"},
|
|
239
|
-
uint64(200),
|
|
240
|
-
DEFAULT_TX_CONFIG,
|
|
145
|
++
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(200), DEFAULT_TX_CONFIG
|
|
241
146
|
)
|
|
242
147
|
|
|
243
|
-
|
|
148
|
+
proofs_1 = await cat_wallet_1.lineage_store.get_all_lineage_proofs()
|
|
149
|
+
proofs_2 = await cat_wallet_2.lineage_store.get_all_lineage_proofs()
|
|
150
|
+
assert len(proofs_1) == len(proofs_2)
|
|
151
|
+
assert proofs_1 != proofs_2
|
|
152
|
+
assert cat_wallet_1.lineage_store.table_name != cat_wallet_2.lineage_store.table_name
|
|
244
153
|
|
|
245
|
-
cat_2_hash = await cat_wallet_2.get_new_inner_hash()
|
|
246
|
-
tx_records = await cat_wallet.generate_signed_transaction(
|
|
247
|
-
[uint64(60)], [cat_2_hash], DEFAULT_TX_CONFIG, fee=uint64(1)
|
|
248
|
-
)
|
|
249
|
-
tx_id = None
|
|
250
|
-
await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
251
|
-
for tx_record in tx_records:
|
|
252
|
-
if tx_record.wallet_id is cat_wallet.id():
|
|
253
|
-
tx_id = tx_record.name.hex()
|
|
254
|
-
assert tx_record.to_puzzle_hash == cat_2_hash
|
|
255
|
-
|
|
256
|
-
await time_out_assert(15, full_node_api.txs_in_mempool, True, tx_records)
|
|
257
|
-
|
|
258
|
-
await time_out_assert(20, cat_wallet.get_pending_change_balance, 40)
|
|
259
|
-
memos = await api_0.get_transaction_memo(dict(transaction_id=tx_id))
|
|
260
|
-
assert len(memos[tx_id]) == 2 # One for tx, one for change
|
|
261
|
-
assert list(memos[tx_id].values())[0][0] == cat_2_hash.hex()
|
|
262
|
-
|
|
263
|
-
for i in range(1, num_blocks):
|
|
264
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
|
|
265
|
-
|
|
266
|
-
await time_out_assert(30, wallet.get_confirmed_balance, funds - 101)
|
|
267
|
-
|
|
268
|
-
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
269
|
-
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 40)
|
|
270
|
-
|
|
271
|
-
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 60)
|
|
272
|
-
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 60)
|
|
273
|
-
coins = await cat_wallet_2.select_coins(uint64(60), DEFAULT_COIN_SELECTION_CONFIG)
|
|
274
|
-
assert len(coins) == 1
|
|
275
|
-
coin = coins.pop()
|
|
276
|
-
tx_id = coin.name().hex()
|
|
277
|
-
memos = await api_1.get_transaction_memo(dict(transaction_id=tx_id))
|
|
278
|
-
assert len(memos[tx_id]) == 2
|
|
279
|
-
assert list(memos[tx_id].values())[0][0] == cat_2_hash.hex()
|
|
280
|
-
cat_hash = await cat_wallet.get_new_inner_hash()
|
|
281
|
-
tx_records = await cat_wallet_2.generate_signed_transaction([uint64(15)], [cat_hash], DEFAULT_TX_CONFIG)
|
|
282
|
-
await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
283
|
-
|
|
284
|
-
await time_out_assert(15, full_node_api.txs_in_mempool, True, tx_records)
|
|
285
154
|
|
|
155
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
156
|
+
@pytest.mark.anyio
|
|
157
|
+
async def test_cat_spend(self_hostname: str, two_wallet_nodes: OldSimulatorsAndWallets, trusted: bool) -> None:
|
|
158
|
+
num_blocks = 3
|
|
159
|
+
full_nodes, wallets, _ = two_wallet_nodes
|
|
160
|
+
full_node_api = full_nodes[0]
|
|
161
|
+
full_node_server = full_node_api.server
|
|
162
|
+
wallet_node, server_2 = wallets[0]
|
|
163
|
+
wallet_node_2, server_3 = wallets[1]
|
|
164
|
+
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
165
|
+
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
166
|
+
api_0 = WalletRpcApi(wallet_node)
|
|
167
|
+
api_1 = WalletRpcApi(wallet_node_2)
|
|
168
|
+
ph = await wallet.get_new_puzzlehash()
|
|
169
|
+
if trusted:
|
|
170
|
+
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
171
|
+
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
172
|
+
else:
|
|
173
|
+
wallet_node.config["trusted_peers"] = {}
|
|
174
|
+
wallet_node_2.config["trusted_peers"] = {}
|
|
175
|
+
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
176
|
+
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
177
|
+
|
|
178
|
+
for _ in range(num_blocks):
|
|
286
179
|
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
180
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
287
181
|
|
|
288
|
-
|
|
289
|
-
|
|
182
|
+
funds = sum(
|
|
183
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
184
|
+
)
|
|
290
185
|
|
|
291
|
-
|
|
292
|
-
await full_node_api.reorg_from_index_to_new_index(ReorgProtocol(height - 1, height + 1, 32 * b"1", None))
|
|
293
|
-
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
186
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
294
187
|
|
|
295
|
-
|
|
296
|
-
"trusted",
|
|
297
|
-
[True, False],
|
|
298
|
-
)
|
|
299
|
-
@pytest.mark.anyio
|
|
300
|
-
async def test_cat_reuse_address(self, self_hostname, two_wallet_nodes, trusted):
|
|
301
|
-
num_blocks = 3
|
|
302
|
-
full_nodes, wallets, _ = two_wallet_nodes
|
|
303
|
-
full_node_api = full_nodes[0]
|
|
304
|
-
full_node_server = full_node_api.server
|
|
305
|
-
wallet_node, server_2 = wallets[0]
|
|
306
|
-
wallet_node_2, server_3 = wallets[1]
|
|
307
|
-
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
308
|
-
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
309
|
-
|
|
310
|
-
ph = await wallet.get_new_puzzlehash()
|
|
311
|
-
if trusted:
|
|
312
|
-
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
313
|
-
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
314
|
-
else:
|
|
315
|
-
wallet_node.config["trusted_peers"] = {}
|
|
316
|
-
wallet_node_2.config["trusted_peers"] = {}
|
|
317
|
-
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
318
|
-
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
319
|
-
|
|
320
|
-
for i in range(0, num_blocks):
|
|
321
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
322
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
|
|
323
|
-
|
|
324
|
-
funds = sum(
|
|
325
|
-
[
|
|
326
|
-
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
|
|
327
|
-
for i in range(1, num_blocks + 1)
|
|
328
|
-
]
|
|
188
|
+
async with wallet_node.wallet_state_manager.lock:
|
|
329
|
-
cat_wallet,
|
|
330
|
-
wallet_node.wallet_state_manager,
|
|
331
|
-
wallet,
|
|
332
|
-
{"identifier": "genesis_by_id"},
|
|
333
|
-
uint64(100),
|
|
334
|
-
DEFAULT_TX_CONFIG,
|
|
189
|
++
cat_wallet, tx_records = await CATWallet.create_new_cat_wallet(
|
|
190
|
++
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100), DEFAULT_TX_CONFIG
|
|
335
191
|
)
|
|
336
|
-
|
|
337
|
-
tx_record = tx_queue[0]
|
|
338
|
-
await full_node_api.process_transaction_records(records=[tx_record])
|
|
192
|
++
await full_node_api.process_transaction_records(records=tx_records)
|
|
339
193
|
|
|
340
|
-
|
|
194
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
|
|
195
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
|
|
341
196
|
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
wallet_node.wallet_state_manager,
|
|
345
|
-
wallet,
|
|
346
|
-
{"identifier": "genesis_by_id"},
|
|
347
|
-
uint64(100),
|
|
348
|
-
DEFAULT_TX_CONFIG,
|
|
349
|
-
)
|
|
350
|
-
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
|
|
351
|
-
tx_record = tx_queue[0]
|
|
352
|
-
await full_node_api.process_transaction_records(records=[tx_record])
|
|
197
|
+
assert cat_wallet.cat_info.limitations_program_hash is not None
|
|
198
|
+
asset_id = cat_wallet.get_asset_id()
|
|
353
199
|
|
|
354
|
-
|
|
355
|
-
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
|
|
200
|
+
cat_wallet_2 = await CATWallet.get_or_create_wallet_for_cat(wallet_node_2.wallet_state_manager, wallet2, asset_id)
|
|
356
201
|
|
|
357
|
-
|
|
358
|
-
|
|
202
|
+
assert cat_wallet.cat_info.limitations_program_hash == cat_wallet_2.cat_info.limitations_program_hash
|
|
203
|
+
|
|
204
|
+
cat_2_hash = await cat_wallet_2.get_new_inner_hash()
|
|
205
|
+
tx_records = await cat_wallet.generate_signed_transaction(
|
|
206
|
+
[uint64(60)], [cat_2_hash], DEFAULT_TX_CONFIG, fee=uint64(1)
|
|
207
|
+
)
|
|
208
|
+
tx_id = None
|
|
209
|
+
tx_records = await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
210
|
+
for tx_record in tx_records:
|
|
211
|
+
if tx_record.wallet_id is cat_wallet.id():
|
|
212
|
+
tx_id = tx_record.name.hex()
|
|
213
|
+
assert tx_record.to_puzzle_hash == cat_2_hash
|
|
214
|
+
|
|
215
|
+
await time_out_assert(15, full_node_api.txs_in_mempool, True, tx_records)
|
|
216
|
+
|
|
217
|
+
await time_out_assert(20, cat_wallet.get_pending_change_balance, 40)
|
|
218
|
+
assert tx_id is not None
|
|
219
|
+
memos = await api_0.get_transaction_memo({"transaction_id": tx_id})
|
|
220
|
+
assert len(memos[tx_id]) == 2 # One for tx, one for change
|
|
221
|
+
assert list(memos[tx_id].values())[0][0] == cat_2_hash.hex()
|
|
222
|
+
|
|
223
|
+
for _ in range(1, num_blocks):
|
|
224
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"\0")))
|
|
225
|
+
|
|
226
|
+
await time_out_assert(30, wallet.get_confirmed_balance, funds - 101)
|
|
227
|
+
|
|
228
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
229
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 40)
|
|
230
|
+
|
|
231
|
+
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 60)
|
|
232
|
+
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 60)
|
|
233
|
+
coins = await cat_wallet_2.select_coins(uint64(60), DEFAULT_COIN_SELECTION_CONFIG)
|
|
234
|
+
assert len(coins) == 1
|
|
235
|
+
coin = coins.pop()
|
|
236
|
+
tx_id = coin.name().hex()
|
|
237
|
+
memos = await api_1.get_transaction_memo(dict(transaction_id=tx_id))
|
|
238
|
+
assert len(memos[tx_id]) == 2
|
|
239
|
+
assert list(memos[tx_id].values())[0][0] == cat_2_hash.hex()
|
|
240
|
+
cat_hash = await cat_wallet.get_new_inner_hash()
|
|
241
|
+
tx_records = await cat_wallet_2.generate_signed_transaction([uint64(15)], [cat_hash], DEFAULT_TX_CONFIG)
|
|
242
|
+
tx_records = await wallet2.wallet_state_manager.add_pending_transactions(tx_records)
|
|
243
|
+
|
|
244
|
+
await time_out_assert(15, full_node_api.txs_in_mempool, True, tx_records)
|
|
245
|
+
|
|
246
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
247
|
+
|
|
248
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 55)
|
|
249
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 55)
|
|
250
|
+
|
|
251
|
+
height = full_node_api.full_node.blockchain.get_peak_height()
|
|
252
|
+
assert height is not None
|
|
253
|
+
await full_node_api.reorg_from_index_to_new_index(
|
|
254
|
+
ReorgProtocol(uint32(height - 1), uint32(height + 1), bytes32(32 * b"1"), None)
|
|
255
|
+
)
|
|
256
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
359
257
|
|
|
360
|
-
cat_wallet_2: CATWallet = await CATWallet.get_or_create_wallet_for_cat(
|
|
361
|
-
wallet_node_2.wallet_state_manager, wallet2, asset_id
|
|
362
|
-
)
|
|
363
258
|
|
|
364
|
-
|
|
259
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
260
|
+
@pytest.mark.anyio
|
|
261
|
+
async def test_cat_reuse_address(self_hostname: str, two_wallet_nodes: OldSimulatorsAndWallets, trusted: bool) -> None:
|
|
262
|
+
num_blocks = 3
|
|
263
|
+
full_nodes, wallets, _ = two_wallet_nodes
|
|
264
|
+
full_node_api = full_nodes[0]
|
|
265
|
+
full_node_server = full_node_api.server
|
|
266
|
+
wallet_node, server_2 = wallets[0]
|
|
267
|
+
wallet_node_2, server_3 = wallets[1]
|
|
268
|
+
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
269
|
+
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
270
|
+
|
|
271
|
+
ph = await wallet.get_new_puzzlehash()
|
|
272
|
+
if trusted:
|
|
273
|
+
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
274
|
+
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
275
|
+
else:
|
|
276
|
+
wallet_node.config["trusted_peers"] = {}
|
|
277
|
+
wallet_node_2.config["trusted_peers"] = {}
|
|
278
|
+
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
279
|
+
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
280
|
+
|
|
281
|
+
for _ in range(num_blocks):
|
|
282
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
283
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
284
|
+
|
|
285
|
+
funds = sum(
|
|
286
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
365
290
|
|
|
366
|
-
|
|
367
|
-
tx_records = await cat_wallet.generate_signed_transaction(
|
|
368
|
-
[uint64(60)], [cat_2_hash], DEFAULT_TX_CONFIG.override(reuse_puzhash=True), fee=uint64(1)
|
|
291
|
+
async with wallet_node.wallet_state_manager.lock:
|
|
369
|
-
cat_wallet,
|
|
370
|
-
wallet_node.wallet_state_manager,
|
|
371
|
-
wallet,
|
|
372
|
-
{"identifier": "genesis_by_id"},
|
|
373
|
-
uint64(100),
|
|
374
|
-
DEFAULT_TX_CONFIG,
|
|
292
|
++
cat_wallet, tx_records = await CATWallet.create_new_cat_wallet(
|
|
293
|
++
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100), DEFAULT_TX_CONFIG
|
|
375
294
|
)
|
|
376
|
-
|
|
377
|
-
tx_record = tx_queue[0]
|
|
378
|
-
await full_node_api.process_transaction_records(records=[tx_record])
|
|
379
|
-
await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
380
|
-
for tx_record in tx_records:
|
|
381
|
-
if tx_record.wallet_id is cat_wallet.id():
|
|
382
|
-
assert tx_record.to_puzzle_hash == cat_2_hash
|
|
383
|
-
assert len(tx_record.spend_bundle.coin_spends) == 2
|
|
384
|
-
for cs in tx_record.spend_bundle.coin_spends:
|
|
385
|
-
if cs.coin.amount == 100:
|
|
386
|
-
old_puzhash = cs.coin.puzzle_hash.hex()
|
|
387
|
-
new_puzhash = [c.puzzle_hash.hex() for c in tx_record.additions]
|
|
388
|
-
assert old_puzhash in new_puzhash
|
|
295
|
++
await full_node_api.process_transaction_records(records=tx_records)
|
|
389
296
|
|
|
390
|
-
|
|
297
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
|
|
298
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
|
|
391
299
|
|
|
392
|
-
|
|
300
|
+
assert cat_wallet.cat_info.limitations_program_hash is not None
|
|
301
|
+
asset_id = cat_wallet.get_asset_id()
|
|
393
302
|
|
|
394
|
-
|
|
395
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
|
|
303
|
+
cat_wallet_2 = await CATWallet.get_or_create_wallet_for_cat(wallet_node_2.wallet_state_manager, wallet2, asset_id)
|
|
396
304
|
|
|
397
|
-
|
|
305
|
+
assert cat_wallet.cat_info.limitations_program_hash == cat_wallet_2.cat_info.limitations_program_hash
|
|
398
306
|
|
|
399
|
-
|
|
400
|
-
|
|
307
|
+
cat_2_hash = await cat_wallet_2.get_new_inner_hash()
|
|
308
|
+
tx_records = await cat_wallet.generate_signed_transaction(
|
|
309
|
+
[uint64(60)], [cat_2_hash], DEFAULT_TX_CONFIG.override(reuse_puzhash=True), fee=uint64(1)
|
|
310
|
+
)
|
|
311
|
+
tx_records = await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
312
|
+
for tx_record in tx_records:
|
|
313
|
+
if tx_record.wallet_id is cat_wallet.id():
|
|
314
|
+
assert tx_record.to_puzzle_hash == cat_2_hash
|
|
315
|
+
assert tx_record.spend_bundle is not None
|
|
316
|
+
assert len(tx_record.spend_bundle.coin_spends) == 2
|
|
317
|
+
for cs in tx_record.spend_bundle.coin_spends:
|
|
318
|
+
if cs.coin.amount == 100:
|
|
319
|
+
old_puzhash = cs.coin.puzzle_hash.hex()
|
|
320
|
+
new_puzhash = [c.puzzle_hash.hex() for c in tx_record.additions]
|
|
321
|
+
assert old_puzhash in new_puzhash
|
|
401
322
|
|
|
402
|
-
|
|
403
|
-
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 60)
|
|
323
|
+
await time_out_assert(15, full_node_api.txs_in_mempool, True, tx_records)
|
|
404
324
|
|
|
405
|
-
|
|
406
|
-
tx_records = await cat_wallet_2.generate_signed_transaction([uint64(15)], [cat_hash], DEFAULT_TX_CONFIG)
|
|
407
|
-
await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
325
|
+
await time_out_assert(20, cat_wallet.get_pending_change_balance, 40)
|
|
408
326
|
|
|
409
|
-
|
|
327
|
+
for _ in range(1, num_blocks):
|
|
328
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"\0")))
|
|
410
329
|
|
|
411
|
-
|
|
330
|
+
await time_out_assert(30, wallet.get_confirmed_balance, funds - 101)
|
|
412
331
|
|
|
413
|
-
|
|
414
|
-
|
|
332
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
333
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 40)
|
|
415
334
|
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
335
|
+
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 60)
|
|
336
|
+
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 60)
|
|
419
337
|
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
338
|
+
cat_hash = await cat_wallet.get_new_inner_hash()
|
|
339
|
+
tx_records = await cat_wallet_2.generate_signed_transaction([uint64(15)], [cat_hash], DEFAULT_TX_CONFIG)
|
|
340
|
+
tx_records = await wallet2.wallet_state_manager.add_pending_transactions(tx_records)
|
|
341
|
+
|
|
342
|
+
await time_out_assert(15, full_node_api.txs_in_mempool, True, tx_records)
|
|
343
|
+
|
|
344
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
345
|
+
|
|
346
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 55)
|
|
347
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 55)
|
|
348
|
+
|
|
349
|
+
height = full_node_api.full_node.blockchain.get_peak_height()
|
|
350
|
+
assert height is not None
|
|
351
|
+
await full_node_api.reorg_from_index_to_new_index(
|
|
352
|
+
ReorgProtocol(uint32(height - 1), uint32(height + 1), bytes32(32 * b"1"), None)
|
|
423
353
|
)
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
)
|
|
354
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
358
|
+
@pytest.mark.anyio
|
|
359
|
+
async def test_get_wallet_for_asset_id(
|
|
360
|
+
self_hostname: str, two_wallet_nodes: OldSimulatorsAndWallets, trusted: bool
|
|
361
|
+
) -> None:
|
|
362
|
+
num_blocks = 3
|
|
363
|
+
full_nodes, wallets, _ = two_wallet_nodes
|
|
364
|
+
full_node_api = full_nodes[0]
|
|
365
|
+
full_node_server = full_node_api.server
|
|
366
|
+
wallet_node, server_2 = wallets[0]
|
|
367
|
+
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
368
|
+
|
|
369
|
+
ph = await wallet.get_new_puzzlehash()
|
|
370
|
+
if trusted:
|
|
371
|
+
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
372
|
+
else:
|
|
373
|
+
wallet_node.config["trusted_peers"] = {}
|
|
374
|
+
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
375
|
+
|
|
376
|
+
for _ in range(num_blocks):
|
|
377
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
378
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
450
379
|
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
async with wallet_node.wallet_state_manager.lock:
|
|
454
|
-
cat_wallet, _ = await CATWallet.create_new_cat_wallet(
|
|
455
|
-
wallet_node.wallet_state_manager,
|
|
456
|
-
wallet,
|
|
457
|
-
{"identifier": "genesis_by_id"},
|
|
458
|
-
uint64(100),
|
|
459
|
-
DEFAULT_TX_CONFIG,
|
|
460
|
-
)
|
|
461
|
-
|
|
462
|
-
for i in range(1, num_blocks):
|
|
463
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
|
|
464
|
-
|
|
465
|
-
asset_id = cat_wallet.get_asset_id()
|
|
466
|
-
await cat_wallet.set_tail_program(bytes(cat_wallet.cat_info.my_tail).hex())
|
|
467
|
-
assert await wallet_node.wallet_state_manager.get_wallet_for_asset_id(asset_id) == cat_wallet
|
|
468
|
-
|
|
469
|
-
# Test that the a default CAT will initialize correctly
|
|
470
|
-
asset = DEFAULT_CATS[next(iter(DEFAULT_CATS))]
|
|
471
|
-
asset_id = asset["asset_id"]
|
|
472
|
-
cat_wallet_2 = await CATWallet.get_or_create_wallet_for_cat(wallet_node.wallet_state_manager, wallet, asset_id)
|
|
473
|
-
assert cat_wallet_2.get_name() == asset["name"]
|
|
474
|
-
await cat_wallet_2.set_name("Test Name")
|
|
475
|
-
assert cat_wallet_2.get_name() == "Test Name"
|
|
476
|
-
|
|
477
|
-
@pytest.mark.parametrize(
|
|
478
|
-
"trusted",
|
|
479
|
-
[True, False],
|
|
380
|
+
funds = sum(
|
|
381
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
480
382
|
)
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
full_node_server = full_node_api.server
|
|
487
|
-
wallet_node, server_2 = wallets[0]
|
|
488
|
-
wallet_node_2, server_3 = wallets[1]
|
|
489
|
-
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
490
|
-
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
491
|
-
|
|
492
|
-
ph = await wallet.get_new_puzzlehash()
|
|
493
|
-
if trusted:
|
|
494
|
-
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
495
|
-
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
496
|
-
else:
|
|
497
|
-
wallet_node.config["trusted_peers"] = {}
|
|
498
|
-
wallet_node_2.config["trusted_peers"] = {}
|
|
499
|
-
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
500
|
-
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
501
|
-
|
|
502
|
-
for i in range(0, num_blocks):
|
|
503
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
504
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
|
|
505
|
-
|
|
506
|
-
funds = sum(
|
|
507
|
-
[
|
|
508
|
-
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
|
|
509
|
-
for i in range(1, num_blocks + 1)
|
|
510
|
-
]
|
|
383
|
+
|
|
384
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
385
|
+
|
|
386
|
+
async with wallet_node.wallet_state_manager.lock:
|
|
387
|
+
cat_wallet, _ = await CATWallet.create_new_cat_wallet(
|
|
511
|
-
wallet_node.wallet_state_manager,
|
|
512
|
-
wallet,
|
|
513
|
-
{"identifier": "genesis_by_id"},
|
|
514
|
-
uint64(100),
|
|
515
|
-
DEFAULT_TX_CONFIG,
|
|
388
|
++
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100), DEFAULT_TX_CONFIG
|
|
516
389
|
)
|
|
517
390
|
|
|
518
|
-
|
|
391
|
+
for _ in range(1, num_blocks):
|
|
392
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
519
393
|
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
{"identifier": "genesis_by_id"},
|
|
525
|
-
uint64(100),
|
|
526
|
-
DEFAULT_TX_CONFIG,
|
|
527
|
-
)
|
|
528
|
-
tx_records: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
|
|
529
|
-
await full_node_api.process_transaction_records(records=tx_records)
|
|
394
|
+
asset_id = cat_wallet.get_asset_id()
|
|
395
|
+
assert cat_wallet.cat_info.my_tail is not None
|
|
396
|
+
await cat_wallet.set_tail_program(bytes(cat_wallet.cat_info.my_tail).hex())
|
|
397
|
+
assert await wallet_node.wallet_state_manager.get_wallet_for_asset_id(asset_id) == cat_wallet
|
|
530
398
|
|
|
531
|
-
|
|
532
|
-
|
|
399
|
+
# Test that the a default CAT will initialize correctly
|
|
400
|
+
asset = DEFAULT_CATS[next(iter(DEFAULT_CATS))]
|
|
401
|
+
asset_id = asset["asset_id"]
|
|
402
|
+
cat_wallet_2 = await CATWallet.get_or_create_wallet_for_cat(wallet_node.wallet_state_manager, wallet, asset_id)
|
|
403
|
+
assert cat_wallet_2.get_name() == asset["name"]
|
|
404
|
+
await cat_wallet_2.set_name("Test Name")
|
|
405
|
+
assert cat_wallet_2.get_name() == "Test Name"
|
|
533
406
|
|
|
534
|
-
assert cat_wallet.cat_info.limitations_program_hash is not None
|
|
535
|
-
asset_id = cat_wallet.get_asset_id()
|
|
536
407
|
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
408
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
409
|
+
@pytest.mark.anyio
|
|
410
|
+
async def test_cat_doesnt_see_eve(self_hostname: str, two_wallet_nodes: OldSimulatorsAndWallets, trusted: bool) -> None:
|
|
411
|
+
num_blocks = 3
|
|
412
|
+
full_nodes, wallets, _ = two_wallet_nodes
|
|
413
|
+
full_node_api = full_nodes[0]
|
|
414
|
+
full_node_server = full_node_api.server
|
|
415
|
+
wallet_node, server_2 = wallets[0]
|
|
416
|
+
wallet_node_2, server_3 = wallets[1]
|
|
417
|
+
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
418
|
+
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
419
|
+
|
|
420
|
+
ph = await wallet.get_new_puzzlehash()
|
|
421
|
+
if trusted:
|
|
422
|
+
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
423
|
+
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
424
|
+
else:
|
|
425
|
+
wallet_node.config["trusted_peers"] = {}
|
|
426
|
+
wallet_node_2.config["trusted_peers"] = {}
|
|
427
|
+
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
428
|
+
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
429
|
+
|
|
430
|
+
for _ in range(num_blocks):
|
|
431
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
432
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
433
|
+
|
|
434
|
+
funds = sum(
|
|
435
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
436
|
+
)
|
|
540
437
|
|
|
541
|
-
|
|
438
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
542
439
|
|
|
543
|
-
|
|
544
|
-
tx_records = await cat_wallet.generate_signed_transaction(
|
|
545
|
-
[uint64(60)], [cat_2_hash], DEFAULT_TX_CONFIG, fee=uint64(1)
|
|
440
|
+
async with wallet_node.wallet_state_manager.lock:
|
|
546
|
-
cat_wallet,
|
|
547
|
-
wallet_node.wallet_state_manager,
|
|
548
|
-
wallet,
|
|
549
|
-
{"identifier": "genesis_by_id"},
|
|
550
|
-
uint64(100),
|
|
551
|
-
DEFAULT_TX_CONFIG,
|
|
441
|
++
cat_wallet, tx_records = await CATWallet.create_new_cat_wallet(
|
|
442
|
++
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100), DEFAULT_TX_CONFIG
|
|
552
443
|
)
|
|
553
|
-
|
|
554
|
-
await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
555
|
-
await full_node_api.process_transaction_records(records=tx_records)
|
|
444
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
556
445
|
|
|
557
|
-
|
|
558
|
-
|
|
446
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
|
|
447
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
|
|
559
448
|
|
|
560
|
-
|
|
561
|
-
|
|
449
|
+
assert cat_wallet.cat_info.limitations_program_hash is not None
|
|
450
|
+
asset_id = cat_wallet.get_asset_id()
|
|
562
451
|
|
|
563
|
-
|
|
564
|
-
await time_out_assert(20, cat_wallet_2.get_unconfirmed_balance, 60)
|
|
452
|
+
cat_wallet_2 = await CATWallet.get_or_create_wallet_for_cat(wallet_node_2.wallet_state_manager, wallet2, asset_id)
|
|
565
453
|
|
|
566
|
-
|
|
567
|
-
[tx_record] = await wallet.wallet_state_manager.main_wallet.generate_signed_transaction(
|
|
568
|
-
10, cc2_ph, DEFAULT_TX_CONFIG, 0
|
|
569
|
-
)
|
|
570
|
-
await wallet.wallet_state_manager.add_pending_transactions([tx_record])
|
|
571
|
-
await full_node_api.process_transaction_records(records=[tx_record])
|
|
454
|
+
assert cat_wallet.cat_info.limitations_program_hash == cat_wallet_2.cat_info.limitations_program_hash
|
|
572
455
|
|
|
573
|
-
|
|
574
|
-
|
|
456
|
+
cat_2_hash = await cat_wallet_2.get_new_inner_hash()
|
|
457
|
+
tx_records = await cat_wallet.generate_signed_transaction(
|
|
458
|
+
[uint64(60)], [cat_2_hash], DEFAULT_TX_CONFIG, fee=uint64(1)
|
|
459
|
+
)
|
|
460
|
+
tx_records = await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
461
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
462
|
+
|
|
463
|
+
await time_out_assert(30, wallet.get_confirmed_balance, funds - 101)
|
|
464
|
+
await time_out_assert(30, wallet.get_unconfirmed_balance, funds - 101)
|
|
575
465
|
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
return len(list(filter(lambda tx: tx.amount == 10, all_txs)))
|
|
466
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
467
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 40)
|
|
579
468
|
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
await time_out_assert(20, cat_wallet_2.get_confirmed_balance, 60)
|
|
583
|
-
await time_out_assert(20, cat_wallet_2.get_unconfirmed_balance, 60)
|
|
469
|
+
await time_out_assert(20, cat_wallet_2.get_confirmed_balance, 60)
|
|
470
|
+
await time_out_assert(20, cat_wallet_2.get_unconfirmed_balance, 60)
|
|
584
471
|
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
472
|
+
cc2_ph = await cat_wallet_2.get_new_cat_puzzle_hash()
|
|
473
|
+
[tx_record] = await wallet.wallet_state_manager.main_wallet.generate_signed_transaction(
|
|
474
|
+
uint64(10), cc2_ph, DEFAULT_TX_CONFIG
|
|
588
475
|
)
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
num_blocks = 3
|
|
592
|
-
full_nodes, wallets, _ = three_wallet_nodes
|
|
593
|
-
full_node_api = full_nodes[0]
|
|
594
|
-
full_node_server = full_node_api.server
|
|
595
|
-
wallet_node_0, wallet_server_0 = wallets[0]
|
|
596
|
-
wallet_node_1, wallet_server_1 = wallets[1]
|
|
597
|
-
wallet_node_2, wallet_server_2 = wallets[2]
|
|
598
|
-
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
|
|
599
|
-
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
|
|
600
|
-
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
601
|
-
|
|
602
|
-
ph = await wallet_0.get_new_puzzlehash()
|
|
603
|
-
if trusted:
|
|
604
|
-
wallet_node_0.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
605
|
-
wallet_node_1.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
606
|
-
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
607
|
-
else:
|
|
608
|
-
wallet_node_0.config["trusted_peers"] = {}
|
|
609
|
-
wallet_node_1.config["trusted_peers"] = {}
|
|
610
|
-
wallet_node_2.config["trusted_peers"] = {}
|
|
611
|
-
await wallet_server_0.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
612
|
-
await wallet_server_1.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
613
|
-
await wallet_server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
614
|
-
|
|
615
|
-
for i in range(0, num_blocks):
|
|
616
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
617
|
-
|
|
618
|
-
funds = sum(
|
|
619
|
-
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
|
|
620
|
-
)
|
|
476
|
+
[tx_record] = await wallet.wallet_state_manager.add_pending_transactions([tx_record])
|
|
477
|
+
await full_node_api.process_transaction_records(records=[tx_record])
|
|
621
478
|
|
|
622
|
-
|
|
479
|
+
id = cat_wallet_2.id()
|
|
480
|
+
wsm = cat_wallet_2.wallet_state_manager
|
|
623
481
|
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
wallet_0,
|
|
628
|
-
{"identifier": "genesis_by_id"},
|
|
629
|
-
uint64(100),
|
|
630
|
-
DEFAULT_TX_CONFIG,
|
|
631
|
-
)
|
|
632
|
-
tx_records: List[TransactionRecord] = await wallet_node_0.wallet_state_manager.tx_store.get_not_sent()
|
|
633
|
-
await full_node_api.process_transaction_records(records=tx_records)
|
|
482
|
+
async def query_and_assert_transactions(wsm: WalletStateManager, id: uint32) -> int:
|
|
483
|
+
all_txs = await wsm.tx_store.get_all_transactions_for_wallet(id)
|
|
484
|
+
return len(list(filter(lambda tx: tx.amount == 10, all_txs)))
|
|
634
485
|
|
|
635
|
-
|
|
636
|
-
|
|
486
|
+
await time_out_assert(20, query_and_assert_transactions, 0, wsm, id)
|
|
487
|
+
await time_out_assert(20, wsm.get_confirmed_balance_for_wallet, 60, id)
|
|
488
|
+
await time_out_assert(20, cat_wallet_2.get_confirmed_balance, 60)
|
|
489
|
+
await time_out_assert(20, cat_wallet_2.get_unconfirmed_balance, 60)
|
|
637
490
|
|
|
638
|
-
assert cat_wallet_0.cat_info.limitations_program_hash is not None
|
|
639
|
-
asset_id = cat_wallet_0.get_asset_id()
|
|
640
491
|
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
492
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
493
|
+
@pytest.mark.anyio
|
|
494
|
+
async def test_cat_spend_multiple(
|
|
495
|
+
self_hostname: str, three_wallet_nodes: OldSimulatorsAndWallets, trusted: bool
|
|
496
|
+
) -> None:
|
|
497
|
+
num_blocks = 3
|
|
498
|
+
full_nodes, wallets, _ = three_wallet_nodes
|
|
499
|
+
full_node_api = full_nodes[0]
|
|
500
|
+
full_node_server = full_node_api.server
|
|
501
|
+
wallet_node_0, wallet_server_0 = wallets[0]
|
|
502
|
+
wallet_node_1, wallet_server_1 = wallets[1]
|
|
503
|
+
wallet_node_2, wallet_server_2 = wallets[2]
|
|
504
|
+
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
|
|
505
|
+
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
|
|
506
|
+
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
507
|
+
|
|
508
|
+
ph = await wallet_0.get_new_puzzlehash()
|
|
509
|
+
if trusted:
|
|
510
|
+
wallet_node_0.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
511
|
+
wallet_node_1.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
512
|
+
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
513
|
+
else:
|
|
514
|
+
wallet_node_0.config["trusted_peers"] = {}
|
|
515
|
+
wallet_node_1.config["trusted_peers"] = {}
|
|
516
|
+
wallet_node_2.config["trusted_peers"] = {}
|
|
517
|
+
await wallet_server_0.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
518
|
+
await wallet_server_1.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
519
|
+
await wallet_server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
520
|
+
|
|
521
|
+
for _ in range(num_blocks):
|
|
522
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
523
|
+
|
|
524
|
+
funds = sum(
|
|
525
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
|
|
526
|
+
)
|
|
644
527
|
|
|
645
|
-
|
|
646
|
-
|
|
528
|
+
await time_out_assert(20, wallet_0.get_confirmed_balance, funds)
|
|
529
|
+
|
|
530
|
+
async with wallet_node_0.wallet_state_manager.lock:
|
|
647
|
-
cat_wallet_0,
|
|
531
|
++
cat_wallet_0, tx_records = await CATWallet.create_new_cat_wallet(
|
|
532
|
+
wallet_node_0.wallet_state_manager,
|
|
533
|
+
wallet_0,
|
|
534
|
+
{"identifier": "genesis_by_id"},
|
|
535
|
+
uint64(100),
|
|
536
|
+
DEFAULT_TX_CONFIG,
|
|
648
537
|
)
|
|
649
|
-
|
|
538
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
650
539
|
|
|
651
|
-
|
|
652
|
-
|
|
540
|
+
await time_out_assert(20, cat_wallet_0.get_confirmed_balance, 100)
|
|
541
|
+
await time_out_assert(20, cat_wallet_0.get_unconfirmed_balance, 100)
|
|
653
542
|
|
|
654
|
-
|
|
655
|
-
|
|
543
|
+
assert cat_wallet_0.cat_info.limitations_program_hash is not None
|
|
544
|
+
asset_id = cat_wallet_0.get_asset_id()
|
|
656
545
|
|
|
657
|
-
|
|
658
|
-
[uint64(60), uint64(20)], [cat_1_hash, cat_2_hash], DEFAULT_TX_CONFIG
|
|
659
|
-
)
|
|
660
|
-
await wallet_0.wallet_state_manager.add_pending_transactions(tx_records)
|
|
661
|
-
await full_node_api.process_transaction_records(records=tx_records)
|
|
546
|
+
cat_wallet_1 = await CATWallet.get_or_create_wallet_for_cat(wallet_node_1.wallet_state_manager, wallet_1, asset_id)
|
|
662
547
|
|
|
663
|
-
|
|
664
|
-
await time_out_assert(20, cat_wallet_0.get_unconfirmed_balance, 20)
|
|
548
|
+
cat_wallet_2 = await CATWallet.get_or_create_wallet_for_cat(wallet_node_2.wallet_state_manager, wallet_2, asset_id)
|
|
665
549
|
|
|
666
|
-
|
|
667
|
-
|
|
550
|
+
assert cat_wallet_0.cat_info.limitations_program_hash == cat_wallet_1.cat_info.limitations_program_hash
|
|
551
|
+
assert cat_wallet_0.cat_info.limitations_program_hash == cat_wallet_2.cat_info.limitations_program_hash
|
|
668
552
|
|
|
669
|
-
|
|
670
|
-
|
|
553
|
+
cat_1_hash = await cat_wallet_1.get_new_inner_hash()
|
|
554
|
+
cat_2_hash = await cat_wallet_2.get_new_inner_hash()
|
|
671
555
|
|
|
672
|
-
|
|
556
|
+
tx_records = await cat_wallet_0.generate_signed_transaction(
|
|
557
|
+
[uint64(60), uint64(20)], [cat_1_hash, cat_2_hash], DEFAULT_TX_CONFIG
|
|
558
|
+
)
|
|
559
|
+
tx_records = await wallet_0.wallet_state_manager.add_pending_transactions(tx_records)
|
|
560
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
561
|
+
|
|
562
|
+
await time_out_assert(20, cat_wallet_0.get_confirmed_balance, 20)
|
|
563
|
+
await time_out_assert(20, cat_wallet_0.get_unconfirmed_balance, 20)
|
|
564
|
+
|
|
565
|
+
await time_out_assert(30, cat_wallet_1.get_confirmed_balance, 60)
|
|
566
|
+
await time_out_assert(30, cat_wallet_1.get_unconfirmed_balance, 60)
|
|
567
|
+
|
|
568
|
+
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 20)
|
|
569
|
+
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 20)
|
|
673
570
|
|
|
674
|
-
|
|
675
|
-
await wallet_1.wallet_state_manager.add_pending_transactions(tx_records)
|
|
571
|
+
cat_hash = await cat_wallet_0.get_new_inner_hash()
|
|
676
572
|
|
|
677
|
-
|
|
678
|
-
|
|
573
|
+
tx_records = await cat_wallet_1.generate_signed_transaction([uint64(15)], [cat_hash], DEFAULT_TX_CONFIG)
|
|
574
|
+
tx_records = await wallet_1.wallet_state_manager.add_pending_transactions(tx_records)
|
|
679
575
|
|
|
680
|
-
|
|
576
|
+
tx_records_2 = await cat_wallet_2.generate_signed_transaction([uint64(20)], [cat_hash], DEFAULT_TX_CONFIG)
|
|
577
|
+
tx_records_2 = await wallet_2.wallet_state_manager.add_pending_transactions(tx_records_2)
|
|
681
578
|
|
|
682
|
-
|
|
683
|
-
await time_out_assert(20, cat_wallet_0.get_unconfirmed_balance, 55)
|
|
579
|
+
await full_node_api.process_transaction_records(records=[*tx_records, *tx_records_2])
|
|
684
580
|
|
|
685
|
-
|
|
686
|
-
|
|
581
|
+
await time_out_assert(20, cat_wallet_0.get_confirmed_balance, 55)
|
|
582
|
+
await time_out_assert(20, cat_wallet_0.get_unconfirmed_balance, 55)
|
|
687
583
|
|
|
688
|
-
|
|
689
|
-
|
|
584
|
+
await time_out_assert(30, cat_wallet_1.get_confirmed_balance, 45)
|
|
585
|
+
await time_out_assert(30, cat_wallet_1.get_unconfirmed_balance, 45)
|
|
690
586
|
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
587
|
+
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 0)
|
|
588
|
+
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 0)
|
|
589
|
+
|
|
590
|
+
txs = await wallet_1.wallet_state_manager.tx_store.get_transactions_between(cat_wallet_1.id(), 0, 100000)
|
|
591
|
+
# Test with Memo
|
|
592
|
+
tx_records_3 = await cat_wallet_1.generate_signed_transaction(
|
|
593
|
+
[uint64(30)], [cat_hash], DEFAULT_TX_CONFIG, memos=[[b"Markus Walburg"]]
|
|
594
|
+
)
|
|
595
|
+
with pytest.raises(ValueError):
|
|
596
|
+
await cat_wallet_1.generate_signed_transaction(
|
|
597
|
+
[uint64(30)], [cat_hash], DEFAULT_TX_CONFIG, memos=[[b"too"], [b"many"], [b"memos"]]
|
|
696
598
|
)
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
599
|
+
|
|
600
|
+
tx_records_3 = await wallet_1.wallet_state_manager.add_pending_transactions(tx_records_3)
|
|
601
|
+
await time_out_assert(15, full_node_api.txs_in_mempool, True, tx_records_3)
|
|
602
|
+
txs = await wallet_1.wallet_state_manager.tx_store.get_transactions_between(cat_wallet_1.id(), 0, 100000)
|
|
603
|
+
for tx in txs:
|
|
604
|
+
if tx.amount == 30:
|
|
605
|
+
memos = tx.get_memos()
|
|
606
|
+
assert len(memos) == 2 # One for tx, one for change
|
|
607
|
+
assert b"Markus Walburg" in [v for v_list in memos.values() for v in v_list]
|
|
608
|
+
assert tx.spend_bundle is not None
|
|
609
|
+
assert list(memos.keys())[0] in [a.name() for a in tx.spend_bundle.additions()]
|
|
610
|
+
|
|
611
|
+
|
|
612
|
+
@pytest.mark.limit_consensus_modes(allowed=[ConsensusMode.PLAIN, ConsensusMode.HARD_FORK_2_0], reason="save time")
|
|
613
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
614
|
+
@pytest.mark.anyio
|
|
615
|
+
async def test_cat_max_amount_send(
|
|
616
|
+
self_hostname: str, two_wallet_nodes: OldSimulatorsAndWallets, trusted: bool
|
|
617
|
+
) -> None:
|
|
618
|
+
num_blocks = 3
|
|
619
|
+
full_nodes, wallets, _ = two_wallet_nodes
|
|
620
|
+
full_node_api = full_nodes[0]
|
|
621
|
+
full_node_server = full_node_api.server
|
|
622
|
+
wallet_node, server_2 = wallets[0]
|
|
623
|
+
wallet_node_2, server_3 = wallets[1]
|
|
624
|
+
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
625
|
+
|
|
626
|
+
ph = await wallet.get_new_puzzlehash()
|
|
627
|
+
if trusted:
|
|
628
|
+
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
629
|
+
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
630
|
+
else:
|
|
631
|
+
wallet_node.config["trusted_peers"] = {}
|
|
632
|
+
wallet_node_2.config["trusted_peers"] = {}
|
|
633
|
+
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
634
|
+
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
635
|
+
|
|
636
|
+
for _ in range(num_blocks):
|
|
637
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
638
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
639
|
+
|
|
640
|
+
funds = sum(
|
|
641
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
645
|
+
|
|
646
|
+
async with wallet_node.wallet_state_manager.lock:
|
|
743
|
-
cat_wallet,
|
|
744
|
-
wallet_node.wallet_state_manager,
|
|
745
|
-
wallet,
|
|
746
|
-
{"identifier": "genesis_by_id"},
|
|
747
|
-
uint64(100000),
|
|
748
|
-
DEFAULT_TX_CONFIG,
|
|
647
|
++
cat_wallet, tx_records = await CATWallet.create_new_cat_wallet(
|
|
648
|
++
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100000), DEFAULT_TX_CONFIG
|
|
749
649
|
)
|
|
750
|
-
|
|
650
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
751
651
|
|
|
752
|
-
|
|
652
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100000)
|
|
653
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100000)
|
|
753
654
|
|
|
754
|
-
|
|
755
|
-
cat_wallet, _ = await CATWallet.create_new_cat_wallet(
|
|
756
|
-
wallet_node.wallet_state_manager,
|
|
757
|
-
wallet,
|
|
758
|
-
{"identifier": "genesis_by_id"},
|
|
759
|
-
uint64(100000),
|
|
760
|
-
DEFAULT_TX_CONFIG,
|
|
761
|
-
)
|
|
762
|
-
tx_records: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
|
|
763
|
-
await full_node_api.process_transaction_records(records=tx_records)
|
|
655
|
+
assert cat_wallet.cat_info.limitations_program_hash is not None
|
|
764
656
|
|
|
765
|
-
|
|
766
|
-
|
|
657
|
+
cat_2 = await cat_wallet.get_new_inner_puzzle()
|
|
658
|
+
cat_2_hash = cat_2.get_tree_hash()
|
|
659
|
+
amounts = []
|
|
660
|
+
puzzle_hashes = []
|
|
661
|
+
for i in range(1, 50):
|
|
662
|
+
amounts.append(uint64(i))
|
|
663
|
+
puzzle_hashes.append(cat_2_hash)
|
|
664
|
+
spent_coint = (await cat_wallet.get_cat_spendable_coins())[0].coin
|
|
665
|
+
tx_records = await cat_wallet.generate_signed_transaction(
|
|
666
|
+
amounts, puzzle_hashes, DEFAULT_TX_CONFIG, coins={spent_coint}
|
|
667
|
+
)
|
|
668
|
+
tx_records = await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
669
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
767
670
|
|
|
768
|
-
|
|
671
|
+
await asyncio.sleep(2)
|
|
769
672
|
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
673
|
+
async def check_all_there() -> bool:
|
|
674
|
+
spendable = await cat_wallet.get_cat_spendable_coins()
|
|
675
|
+
spendable_name_set = set()
|
|
676
|
+
for record in spendable:
|
|
677
|
+
spendable_name_set.add(record.coin.name())
|
|
678
|
+
puzzle_hash = construct_cat_puzzle(CAT_MOD, cat_wallet.cat_info.limitations_program_hash, cat_2).get_tree_hash()
|
|
774
679
|
for i in range(1, 50):
|
|
775
|
-
coin = Coin(spent_coint.name(), puzzle_hash, i)
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
spendable_name_set = set()
|
|
790
|
-
for record in spendable:
|
|
791
|
-
spendable_name_set.add(record.coin.name())
|
|
792
|
-
puzzle_hash = construct_cat_puzzle(
|
|
793
|
-
CAT_MOD, cat_wallet.cat_info.limitations_program_hash, cat_2
|
|
794
|
-
).get_tree_hash()
|
|
795
|
-
for i in range(1, 50):
|
|
796
|
-
coin = Coin(spent_coint.name(), puzzle_hash, i)
|
|
797
|
-
if coin.name() not in spendable_name_set:
|
|
798
|
-
return False
|
|
799
|
-
return True
|
|
800
|
-
|
|
801
|
-
await time_out_assert(20, check_all_there, True)
|
|
802
|
-
await asyncio.sleep(5)
|
|
803
|
-
max_sent_amount = await cat_wallet.get_max_send_amount()
|
|
804
|
-
|
|
805
|
-
# 1) Generate transaction that is under the limit
|
|
806
|
-
[transaction_record] = await cat_wallet.generate_signed_transaction(
|
|
807
|
-
[max_sent_amount - 1],
|
|
808
|
-
[ph],
|
|
809
|
-
DEFAULT_TX_CONFIG,
|
|
810
|
-
)
|
|
680
|
++
coin = Coin(spent_coint.name(), puzzle_hash, uint64(i))
|
|
681
|
+
if coin.name() not in spendable_name_set:
|
|
682
|
+
return False
|
|
683
|
+
return True
|
|
684
|
+
|
|
685
|
+
await time_out_assert(20, check_all_there, True)
|
|
686
|
+
await asyncio.sleep(5)
|
|
687
|
+
max_sent_amount = await cat_wallet.get_max_send_amount()
|
|
688
|
+
|
|
689
|
+
# 1) Generate transaction that is under the limit
|
|
690
|
+
[transaction_record] = await cat_wallet.generate_signed_transaction(
|
|
691
|
+
[uint64(max_sent_amount - 1)], [ph], DEFAULT_TX_CONFIG
|
|
692
|
+
)
|
|
693
|
+
assert transaction_record.amount == uint64(max_sent_amount - 1)
|
|
811
694
|
|
|
812
|
-
|
|
695
|
+
# 2) Generate transaction that is equal to limit
|
|
696
|
+
[transaction_record] = await cat_wallet.generate_signed_transaction(
|
|
697
|
+
[uint64(max_sent_amount)], [ph], DEFAULT_TX_CONFIG
|
|
698
|
+
)
|
|
699
|
+
assert transaction_record.amount == uint64(max_sent_amount)
|
|
813
700
|
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
[ph],
|
|
818
|
-
DEFAULT_TX_CONFIG,
|
|
819
|
-
)
|
|
701
|
+
# 3) Generate transaction that is greater than limit
|
|
702
|
+
with pytest.raises(ValueError):
|
|
703
|
+
await cat_wallet.generate_signed_transaction([uint64(max_sent_amount + 1)], [ph], DEFAULT_TX_CONFIG)
|
|
820
704
|
|
|
821
|
-
assert transaction_record.amount == uint64(max_sent_amount)
|
|
822
|
-
|
|
823
|
-
# 3) Generate transaction that is greater than limit
|
|
824
|
-
with pytest.raises(ValueError):
|
|
825
|
-
await cat_wallet.generate_signed_transaction(
|
|
826
|
-
[max_sent_amount + 1],
|
|
827
|
-
[ph],
|
|
828
|
-
DEFAULT_TX_CONFIG,
|
|
829
|
-
)
|
|
830
|
-
|
|
831
|
-
@pytest.mark.limit_consensus_modes(allowed=[ConsensusMode.PLAIN, ConsensusMode.HARD_FORK_2_0], reason="save time")
|
|
832
|
-
@pytest.mark.parametrize("trusted", [True, False])
|
|
833
|
-
@pytest.mark.parametrize("autodiscovery", [True, False])
|
|
834
|
-
@pytest.mark.anyio
|
|
835
|
-
async def test_cat_hint(self, self_hostname, two_wallet_nodes, trusted, autodiscovery):
|
|
836
|
-
num_blocks = 3
|
|
837
|
-
full_nodes, wallets, _ = two_wallet_nodes
|
|
838
|
-
full_node_api = full_nodes[0]
|
|
839
|
-
full_node_server = full_node_api.server
|
|
840
|
-
wallet_node, server_2 = wallets[0]
|
|
841
|
-
wallet_node_2, server_3 = wallets[1]
|
|
842
|
-
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
843
|
-
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
844
|
-
|
|
845
|
-
ph = await wallet.get_new_puzzlehash()
|
|
846
|
-
if trusted:
|
|
847
|
-
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
848
|
-
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
849
|
-
else:
|
|
850
|
-
wallet_node.config["trusted_peers"] = {}
|
|
851
|
-
wallet_node_2.config["trusted_peers"] = {}
|
|
852
|
-
wallet_node.config["automatically_add_unknown_cats"] = autodiscovery
|
|
853
|
-
wallet_node_2.config["automatically_add_unknown_cats"] = autodiscovery
|
|
854
|
-
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
855
|
-
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
856
|
-
|
|
857
|
-
for i in range(0, num_blocks):
|
|
858
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
859
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
|
|
860
|
-
|
|
861
|
-
funds = sum(
|
|
862
|
-
[
|
|
863
|
-
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
|
|
864
|
-
for i in range(1, num_blocks + 1)
|
|
865
|
-
]
|
|
866
|
-
)
|
|
867
705
|
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
706
|
+
@pytest.mark.limit_consensus_modes(allowed=[ConsensusMode.PLAIN, ConsensusMode.HARD_FORK_2_0], reason="save time")
|
|
707
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
708
|
+
@pytest.mark.parametrize("autodiscovery", [True, False])
|
|
709
|
+
@pytest.mark.anyio
|
|
710
|
+
async def test_cat_hint(
|
|
711
|
+
self_hostname: str, two_wallet_nodes: OldSimulatorsAndWallets, trusted: bool, autodiscovery: bool
|
|
712
|
+
) -> None:
|
|
713
|
+
num_blocks = 3
|
|
714
|
+
full_nodes, wallets, _ = two_wallet_nodes
|
|
715
|
+
full_node_api = full_nodes[0]
|
|
716
|
+
full_node_server = full_node_api.server
|
|
717
|
+
wallet_node, server_2 = wallets[0]
|
|
718
|
+
wallet_node_2, server_3 = wallets[1]
|
|
719
|
+
wallet = wallet_node.wallet_state_manager.main_wallet
|
|
720
|
+
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
|
|
721
|
+
|
|
722
|
+
ph = await wallet.get_new_puzzlehash()
|
|
723
|
+
if trusted:
|
|
724
|
+
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
725
|
+
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
726
|
+
else:
|
|
727
|
+
wallet_node.config["trusted_peers"] = {}
|
|
728
|
+
wallet_node_2.config["trusted_peers"] = {}
|
|
729
|
+
wallet_node.config["automatically_add_unknown_cats"] = autodiscovery
|
|
730
|
+
wallet_node_2.config["automatically_add_unknown_cats"] = autodiscovery
|
|
731
|
+
await server_2.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
732
|
+
await server_3.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
733
|
+
|
|
734
|
+
for _ in range(num_blocks):
|
|
735
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
736
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
|
|
737
|
+
|
|
738
|
+
funds = sum(
|
|
739
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
743
|
+
|
|
744
|
+
async with wallet_node.wallet_state_manager.lock:
|
|
888
|
-
cat_wallet,
|
|
889
|
-
wallet_node.wallet_state_manager,
|
|
890
|
-
wallet,
|
|
891
|
-
{"identifier": "genesis_by_id"},
|
|
892
|
-
uint64(100),
|
|
893
|
-
DEFAULT_TX_CONFIG,
|
|
745
|
++
cat_wallet, tx_records = await CATWallet.create_new_cat_wallet(
|
|
746
|
++
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100), DEFAULT_TX_CONFIG
|
|
894
747
|
)
|
|
895
|
-
|
|
748
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
896
749
|
|
|
897
|
-
|
|
750
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
|
|
751
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
|
|
752
|
+
assert cat_wallet.cat_info.limitations_program_hash is not None
|
|
898
753
|
|
|
899
|
-
|
|
754
|
+
cat_2_hash = await wallet2.get_new_puzzlehash()
|
|
755
|
+
tx_records = await cat_wallet.generate_signed_transaction(
|
|
756
|
+
[uint64(60)], [cat_2_hash], DEFAULT_TX_CONFIG, memos=[[cat_2_hash]]
|
|
757
|
+
)
|
|
900
758
|
|
|
901
|
-
|
|
902
|
-
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 40)
|
|
759
|
+
tx_records = await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
903
760
|
|
|
904
|
-
|
|
905
|
-
return len(node.wallet_state_manager.wallets.keys())
|
|
761
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
906
762
|
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
await time_out_assert(20, check_wallets, 2, wallet_node_2)
|
|
910
|
-
else:
|
|
911
|
-
# Autodiscovery disabled: test that no wallet was created
|
|
912
|
-
await time_out_assert(20, check_wallets, 1, wallet_node_2)
|
|
763
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
|
|
764
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 40)
|
|
913
765
|
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
766
|
+
if autodiscovery:
|
|
767
|
+
# Autodiscovery enabled: test that wallet was created at this point
|
|
768
|
+
await time_out_assert(20, check_wallets, 2, wallet_node_2)
|
|
769
|
+
else:
|
|
770
|
+
# Autodiscovery disabled: test that no wallet was created
|
|
771
|
+
await time_out_assert(20, check_wallets, 1, wallet_node_2)
|
|
772
|
+
|
|
773
|
+
# Then we update the wallet's default CATs
|
|
774
|
+
wallet_node_2.wallet_state_manager.default_cats = {
|
|
775
|
+
cat_wallet.cat_info.limitations_program_hash.hex(): {
|
|
776
|
+
"asset_id": cat_wallet.cat_info.limitations_program_hash.hex(),
|
|
777
|
+
"name": "Test",
|
|
778
|
+
"symbol": "TST",
|
|
921
779
|
}
|
|
780
|
+
}
|
|
922
781
|
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
782
|
+
# Then we send another transaction
|
|
783
|
+
tx_records = await cat_wallet.generate_signed_transaction(
|
|
784
|
+
[uint64(10)], [cat_2_hash], DEFAULT_TX_CONFIG, memos=[[cat_2_hash]]
|
|
785
|
+
)
|
|
927
786
|
|
|
928
|
-
|
|
787
|
+
tx_records = await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
929
788
|
|
|
930
|
-
|
|
789
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
931
790
|
|
|
932
|
-
|
|
933
|
-
|
|
791
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 30)
|
|
792
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 30)
|
|
934
793
|
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
794
|
+
# Now we check that another wallet WAS created, even if autodiscovery was disabled
|
|
795
|
+
await time_out_assert(20, check_wallets, 2, wallet_node_2)
|
|
796
|
+
cat_wallet_2 = wallet_node_2.wallet_state_manager.wallets[uint32(2)]
|
|
797
|
+
assert isinstance(cat_wallet_2, CATWallet)
|
|
798
|
+
|
|
799
|
+
# Previous balance + balance that triggered creation in case of disabled autodiscovery
|
|
800
|
+
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 70)
|
|
801
|
+
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 70)
|
|
802
|
+
|
|
803
|
+
cat_hash = await cat_wallet.get_new_inner_hash()
|
|
804
|
+
tx_records = await cat_wallet_2.generate_signed_transaction([uint64(5)], [cat_hash], DEFAULT_TX_CONFIG)
|
|
805
|
+
tx_records = await wallet2.wallet_state_manager.add_pending_transactions(tx_records)
|
|
938
806
|
|
|
939
|
-
|
|
940
|
-
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 70)
|
|
941
|
-
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 70)
|
|
807
|
+
await full_node_api.process_transaction_records(records=tx_records)
|
|
942
808
|
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
await wallet.wallet_state_manager.add_pending_transactions(tx_records)
|
|
809
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, 35)
|
|
810
|
+
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 35)
|
|
946
811
|
|
|
947
|
-
await full_node_api.process_transaction_records(records=tx_records)
|
|
948
812
|
|
|
949
|
-
|
|
950
|
-
|
|
813
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
814
|
+
@pytest.mark.anyio
|
|
815
|
+
async def test_cat_change_detection(
|
|
816
|
+
self_hostname: str, one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, trusted: bool
|
|
817
|
+
) -> None:
|
|
818
|
+
num_blocks = 1
|
|
819
|
+
full_nodes, wallets, bt = one_wallet_and_one_simulator_services
|
|
820
|
+
full_node_api = full_nodes[0]._api
|
|
821
|
+
full_node_server = full_node_api.full_node.server
|
|
822
|
+
wallet_service_0 = wallets[0]
|
|
823
|
+
wallet_node_0 = wallet_service_0._node
|
|
824
|
+
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
|
|
825
|
+
|
|
826
|
+
assert wallet_service_0.rpc_server is not None
|
|
827
|
+
|
|
828
|
+
client_0 = await WalletRpcClient.create(
|
|
829
|
+
bt.config["self_hostname"],
|
|
830
|
+
wallet_service_0.rpc_server.listen_port,
|
|
831
|
+
wallet_service_0.root_path,
|
|
832
|
+
wallet_service_0.config,
|
|
833
|
+
)
|
|
834
|
+
wallet_node_0.config["automatically_add_unknown_cats"] = True
|
|
951
835
|
|
|
952
|
-
|
|
953
|
-
"
|
|
954
|
-
|
|
836
|
+
if trusted:
|
|
837
|
+
wallet_node_0.config["trusted_peers"] = {
|
|
838
|
+
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
|
|
839
|
+
}
|
|
840
|
+
else:
|
|
841
|
+
wallet_node_0.config["trusted_peers"] = {}
|
|
842
|
+
|
|
843
|
+
await wallet_node_0.server.start_client(PeerInfo(self_hostname, uint16(full_node_server.get_port())), None)
|
|
844
|
+
await full_node_api.farm_blocks_to_wallet(count=num_blocks, wallet=wallet_0)
|
|
845
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node_0, timeout=20)
|
|
846
|
+
|
|
847
|
+
# Mint CAT to ourselves, immediately spend it to an unhinted puzzle hash that we have manually added to the DB
|
|
848
|
+
# We should pick up this coin as balance even though it is unhinted because it is "change"
|
|
849
|
+
intermediate_sk_un = master_sk_to_wallet_sk_unhardened_intermediate(wallet_node_0.wallet_state_manager.private_key)
|
|
850
|
+
pubkey_unhardened = _derive_path_unhardened(intermediate_sk_un, [100000000]).get_g1()
|
|
851
|
+
inner_puzhash = puzzle_hash_for_pk(pubkey_unhardened)
|
|
852
|
+
puzzlehash_unhardened = construct_cat_puzzle(
|
|
853
|
+
CAT_MOD, Program.to(None).get_tree_hash(), inner_puzhash
|
|
854
|
+
).get_tree_hash_precalc(inner_puzhash)
|
|
855
|
+
change_derivation = DerivationRecord(
|
|
856
|
+
uint32(0), puzzlehash_unhardened, pubkey_unhardened, WalletType.CAT, uint32(2), False
|
|
955
857
|
)
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
await wallet_node_0.wallet_state_manager.puzzle_store.add_derivation_paths([change_derivation])
|
|
1011
|
-
our_puzzle: Program = await wallet_0.get_new_puzzle()
|
|
1012
|
-
cat_puzzle: Program = construct_cat_puzzle(
|
|
1013
|
-
CAT_MOD,
|
|
1014
|
-
Program.to(None).get_tree_hash(),
|
|
1015
|
-
Program.to(1),
|
|
1016
|
-
)
|
|
1017
|
-
addr = encode_puzzle_hash(cat_puzzle.get_tree_hash(), "txch")
|
|
1018
|
-
cat_amount_0 = uint64(100)
|
|
1019
|
-
cat_amount_1 = uint64(5)
|
|
1020
|
-
|
|
1021
|
-
tx = await client_0.send_transaction(1, cat_amount_0, addr, DEFAULT_TX_CONFIG)
|
|
1022
|
-
spend_bundle = tx.spend_bundle
|
|
1023
|
-
assert spend_bundle is not None
|
|
1024
|
-
|
|
1025
|
-
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
|
|
1026
|
-
await full_node_api.farm_blocks_to_wallet(count=num_blocks, wallet=wallet_0)
|
|
1027
|
-
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node_0, timeout=20)
|
|
1028
|
-
|
|
1029
|
-
# Do the eve spend back to our wallet and add the CR layer
|
|
1030
|
-
cat_coin = next(c for c in spend_bundle.additions() if c.amount == cat_amount_0)
|
|
1031
|
-
next_coin = Coin(
|
|
1032
|
-
cat_coin.name(),
|
|
1033
|
-
construct_cat_puzzle(
|
|
1034
|
-
CAT_MOD,
|
|
1035
|
-
Program.to(None).get_tree_hash(),
|
|
1036
|
-
our_puzzle,
|
|
1037
|
-
).get_tree_hash(),
|
|
1038
|
-
cat_amount_0,
|
|
1039
|
-
)
|
|
1040
|
-
eve_spend = await wallet_node_0.wallet_state_manager.sign_transaction(
|
|
1041
|
-
[
|
|
1042
|
-
CoinSpend(
|
|
1043
|
-
cat_coin,
|
|
1044
|
-
cat_puzzle,
|
|
1045
|
-
Program.to(
|
|
858
|
+
# Insert the derivation record before the wallet exists so that it is not subscribed to
|
|
859
|
+
await wallet_node_0.wallet_state_manager.puzzle_store.add_derivation_paths([change_derivation])
|
|
860
|
+
our_puzzle = await wallet_0.get_new_puzzle()
|
|
861
|
+
cat_puzzle = construct_cat_puzzle(
|
|
862
|
+
CAT_MOD,
|
|
863
|
+
Program.to(None).get_tree_hash(),
|
|
864
|
+
Program.to(1),
|
|
865
|
+
)
|
|
866
|
+
addr = encode_puzzle_hash(cat_puzzle.get_tree_hash(), "txch")
|
|
867
|
+
cat_amount_0 = uint64(100)
|
|
868
|
+
cat_amount_1 = uint64(5)
|
|
869
|
+
|
|
870
|
+
tx = await client_0.send_transaction(1, cat_amount_0, addr, DEFAULT_TX_CONFIG)
|
|
871
|
+
spend_bundle = tx.spend_bundle
|
|
872
|
+
assert spend_bundle is not None
|
|
873
|
+
|
|
874
|
+
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
|
|
875
|
+
await full_node_api.farm_blocks_to_wallet(count=num_blocks, wallet=wallet_0)
|
|
876
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node_0, timeout=20)
|
|
877
|
+
|
|
878
|
+
# Do the eve spend back to our wallet and add the CR layer
|
|
879
|
+
cat_coin = next(c for c in spend_bundle.additions() if c.amount == cat_amount_0)
|
|
880
|
+
next_coin = Coin(
|
|
881
|
+
cat_coin.name(),
|
|
882
|
+
construct_cat_puzzle(CAT_MOD, Program.to(None).get_tree_hash(), our_puzzle).get_tree_hash(),
|
|
883
|
+
cat_amount_0,
|
|
884
|
+
)
|
|
885
|
+
eve_spend, _ = await wallet_node_0.wallet_state_manager.sign_bundle(
|
|
886
|
+
[
|
|
887
|
+
make_spend(
|
|
888
|
+
cat_coin,
|
|
889
|
+
cat_puzzle,
|
|
890
|
+
Program.to(
|
|
891
|
+
[
|
|
892
|
+
Program.to(
|
|
893
|
+
[
|
|
894
|
+
[51, our_puzzle.get_tree_hash(), cat_amount_0, [our_puzzle.get_tree_hash()]],
|
|
895
|
+
[51, None, -113, None, None],
|
|
896
|
+
]
|
|
897
|
+
),
|
|
898
|
+
None,
|
|
899
|
+
cat_coin.name(),
|
|
900
|
+
coin_as_list(cat_coin),
|
|
901
|
+
[cat_coin.parent_coin_info, Program.to(1).get_tree_hash(), cat_coin.amount],
|
|
902
|
+
0,
|
|
903
|
+
0,
|
|
904
|
+
]
|
|
905
|
+
),
|
|
906
|
+
),
|
|
907
|
+
make_spend(
|
|
908
|
+
next_coin,
|
|
909
|
+
construct_cat_puzzle(CAT_MOD, Program.to(None).get_tree_hash(), our_puzzle),
|
|
910
|
+
Program.to(
|
|
911
|
+
[
|
|
1046
912
|
[
|
|
1047
|
-
|
|
913
|
+
None,
|
|
914
|
+
(
|
|
915
|
+
1,
|
|
1048
916
|
[
|
|
1049
|
-
[
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
cat_amount_0,
|
|
1053
|
-
[our_puzzle.get_tree_hash()],
|
|
1054
|
-
],
|
|
1055
|
-
[51, None, -113, None, None],
|
|
1056
|
-
]
|
|
917
|
+
[51, inner_puzhash, cat_amount_1],
|
|
918
|
+
[51, bytes32([0] * 32), cat_amount_0 - cat_amount_1],
|
|
919
|
+
],
|
|
1057
920
|
),
|
|
1058
921
|
None,
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
construct_cat_puzzle(
|
|
1070
|
-
CAT_MOD,
|
|
1071
|
-
Program.to(None).get_tree_hash(),
|
|
1072
|
-
our_puzzle,
|
|
1073
|
-
),
|
|
1074
|
-
Program.to(
|
|
1075
|
-
[
|
|
1076
|
-
[
|
|
1077
|
-
None,
|
|
1078
|
-
(
|
|
1079
|
-
1,
|
|
1080
|
-
[
|
|
1081
|
-
[51, inner_puzhash, cat_amount_1],
|
|
1082
|
-
[51, bytes32([0] * 32), cat_amount_0 - cat_amount_1],
|
|
1083
|
-
],
|
|
1084
|
-
),
|
|
1085
|
-
None,
|
|
1086
|
-
],
|
|
1087
|
-
LineageProof(
|
|
1088
|
-
cat_coin.parent_coin_info, Program.to(1).get_tree_hash(), cat_amount_0
|
|
1089
|
-
).to_program(),
|
|
1090
|
-
next_coin.name(),
|
|
1091
|
-
coin_as_list(next_coin),
|
|
1092
|
-
[next_coin.parent_coin_info, our_puzzle.get_tree_hash(), next_coin.amount],
|
|
1093
|
-
0,
|
|
1094
|
-
0,
|
|
1095
|
-
]
|
|
1096
|
-
),
|
|
922
|
+
],
|
|
923
|
+
LineageProof(
|
|
924
|
+
cat_coin.parent_coin_info, Program.to(1).get_tree_hash(), cat_amount_0
|
|
925
|
+
).to_program(),
|
|
926
|
+
next_coin.name(),
|
|
927
|
+
coin_as_list(next_coin),
|
|
928
|
+
[next_coin.parent_coin_info, our_puzzle.get_tree_hash(), next_coin.amount],
|
|
929
|
+
0,
|
|
930
|
+
0,
|
|
931
|
+
]
|
|
1097
932
|
),
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
async def check_wallets(node):
|
|
1106
|
-
return len(node.wallet_state_manager.wallets.keys())
|
|
933
|
+
),
|
|
934
|
+
],
|
|
935
|
+
)
|
|
936
|
+
await client_0.push_tx(eve_spend)
|
|
937
|
+
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, eve_spend.name())
|
|
938
|
+
await full_node_api.farm_blocks_to_wallet(count=num_blocks, wallet=wallet_0)
|
|
939
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node_0, timeout=20)
|
|
1107
940
|
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1111
|
-
|
|
941
|
+
await time_out_assert(20, check_wallets, 2, wallet_node_0)
|
|
942
|
+
cat_wallet = wallet_node_0.wallet_state_manager.wallets[uint32(2)]
|
|
943
|
+
await time_out_assert(20, cat_wallet.get_confirmed_balance, cat_amount_1)
|
|
944
|
+
assert not full_node_api.full_node.subscriptions.has_puzzle_subscription(puzzlehash_unhardened)
|
|
1112
945
|
|
|
1113
946
|
|
|
1114
947
|
@pytest.mark.anyio
|
|
@@@ -67,8 -66,8 +67,8 @@@ async def generate_coins
|
|
|
67
67
|
CAT_MOD,
|
|
68
68
|
[
|
|
69
69
|
SpendableCAT(
|
|
70
|
-
Coin(parent_coin.name(), cat_puzzle_hash, amount),
|
|
71
|
-
|
|
72
|
-
tail.get_tree_hash(),
|
|
70
|
++
Coin(parent_coin.name(), cat_puzzle_hash, uint64(amount)),
|
|
71
|
+
tail_hash,
|
|
73
72
|
acs,
|
|
74
73
|
Program.to([[51, acs_ph, amount], [51, 0, -113, tail, []]]),
|
|
75
74
|
)
|
|
@@@ -1552,439 -1511,457 +1552,442 @@@ async def test_cat_trades
|
|
|
1552
1552
|
assert result.error is None
|
|
1553
1553
|
|
|
1554
1554
|
|
|
1555
|
-
@pytest.mark.parametrize(
|
|
1556
|
-
|
|
1557
|
-
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1555
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
1556
|
+
@pytest.mark.anyio
|
|
1557
|
+
async def test_trade_cancellation(wallets_prefarm):
|
|
1558
|
+
(
|
|
1559
|
+
[wallet_node_maker, maker_funds],
|
|
1560
|
+
[wallet_node_taker, taker_funds],
|
|
1561
|
+
full_node,
|
|
1562
|
+
) = wallets_prefarm
|
|
1563
|
+
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1564
|
+
wallet_taker = wallet_node_taker.wallet_state_manager.main_wallet
|
|
1565
|
+
|
|
1566
|
+
xch_to_cat_amount = uint64(100)
|
|
1567
|
+
|
|
1568
|
+
async with wallet_node_maker.wallet_state_manager.lock:
|
|
1569
|
+
cat_wallet_maker, tx_records = await CATWallet.create_new_cat_wallet(
|
|
1570
|
+
wallet_node_maker.wallet_state_manager,
|
|
1571
|
+
wallet_maker,
|
|
1572
|
+
{"identifier": "genesis_by_id"},
|
|
1573
|
+
xch_to_cat_amount,
|
|
1574
|
+
DEFAULT_TX_CONFIG,
|
|
1575
|
+
)
|
|
1569
1576
|
|
|
1570
|
-
|
|
1577
|
+
await full_node.process_transaction_records(records=tx_records)
|
|
1571
1578
|
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
{"identifier": "genesis_by_id"},
|
|
1577
|
-
xch_to_cat_amount,
|
|
1578
|
-
DEFAULT_TX_CONFIG,
|
|
1579
|
-
)
|
|
1579
|
+
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount)
|
|
1580
|
+
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1581
|
+
maker_funds -= xch_to_cat_amount
|
|
1582
|
+
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1580
1583
|
|
|
1581
|
-
|
|
1584
|
+
cat_for_chia = {
|
|
1585
|
+
wallet_maker.id(): 1,
|
|
1586
|
+
cat_wallet_maker.id(): -2,
|
|
1587
|
+
}
|
|
1582
1588
|
|
|
1583
|
-
|
|
1589
|
+
chia_for_cat = {
|
|
1590
|
+
wallet_maker.id(): -3,
|
|
1591
|
+
cat_wallet_maker.id(): 4,
|
|
1592
|
+
}
|
|
1584
1593
|
|
|
1585
|
-
|
|
1586
|
-
|
|
1587
|
-
maker_funds -= xch_to_cat_amount
|
|
1588
|
-
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1594
|
+
trade_manager_maker = wallet_node_maker.wallet_state_manager.trade_manager
|
|
1595
|
+
trade_manager_taker = wallet_node_taker.wallet_state_manager.trade_manager
|
|
1589
1596
|
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1597
|
+
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(cat_for_chia, DEFAULT_TX_CONFIG)
|
|
1598
|
+
assert error is None
|
|
1599
|
+
assert success is True
|
|
1600
|
+
assert trade_make is not None
|
|
1594
1601
|
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1602
|
+
# Cancelling the trade and trying an ID that doesn't exist just in case
|
|
1603
|
+
await trade_manager_maker.cancel_pending_offers(
|
|
1604
|
+
[trade_make.trade_id, bytes32([0] * 32)], DEFAULT_TX_CONFIG, secure=False
|
|
1605
|
+
)
|
|
1606
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager_maker, trade_make)
|
|
1607
|
+
|
|
1608
|
+
# Due to current mempool rules, trying to force a take out of the mempool with a cancel will not work.
|
|
1609
|
+
# Uncomment this when/if it does
|
|
1610
|
+
|
|
1611
|
+
# [maker_offer], signing_response = await wallet_node_maker.wallet_state_manager.sign_offers(
|
|
1612
|
+
# [Offer.from_bytes(trade_make.offer)]
|
|
1613
|
+
# )
|
|
1614
|
+
# trade_take, tx_records = await trade_manager_taker.respond_to_offer(
|
|
1615
|
+
# maker_offer,
|
|
1616
|
+
# )
|
|
1617
|
+
# tx_records = await wallet_taker.wallet_state_manager.add_pending_transactions(
|
|
1618
|
+
# tx_records,
|
|
1619
|
+
# additional_signing_responses=signing_response,
|
|
1620
|
+
# )
|
|
1621
|
+
# await time_out_assert(15, full_node.txs_in_mempool, True, tx_records)
|
|
1622
|
+
# assert trade_take is not None
|
|
1623
|
+
# assert tx_records is not None
|
|
1624
|
+
# await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CONFIRM, trade_manager_taker, trade_take)
|
|
1625
|
+
# await time_out_assert(
|
|
1626
|
+
# 15,
|
|
1627
|
+
# full_node.tx_id_in_mempool,
|
|
1628
|
+
# True,
|
|
1629
|
+
# Offer.from_bytes(trade_take.offer).to_valid_spend().name(),
|
|
1630
|
+
# )
|
|
1631
|
+
|
|
1632
|
+
fee = uint64(2_000_000_000_000)
|
|
1633
|
+
|
|
1634
|
+
txs = await trade_manager_maker.cancel_pending_offers(
|
|
1635
|
+
[trade_make.trade_id], DEFAULT_TX_CONFIG, fee=fee, secure=True
|
|
1636
|
+
)
|
|
1637
|
+
txs = await wallet_maker.wallet_state_manager.add_pending_transactions(txs)
|
|
1638
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager_maker, trade_make)
|
|
1639
|
+
await full_node.process_transaction_records(records=txs)
|
|
1640
|
+
|
|
1641
|
+
sum_of_outgoing = uint64(0)
|
|
1642
|
+
sum_of_incoming = uint64(0)
|
|
1643
|
+
for tx in txs:
|
|
1644
|
+
if tx.type == TransactionType.OUTGOING_TX.value:
|
|
1645
|
+
sum_of_outgoing = uint64(sum_of_outgoing + tx.amount)
|
|
1646
|
+
elif tx.type == TransactionType.INCOMING_TX.value:
|
|
1647
|
+
sum_of_incoming = uint64(sum_of_incoming + tx.amount)
|
|
1648
|
+
assert (sum_of_outgoing - sum_of_incoming) == 0
|
|
1649
|
+
|
|
1650
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager_maker, trade_make)
|
|
1651
|
+
# await time_out_assert(15, get_trade_and_status, TradeStatus.FAILED, trade_manager_taker, trade_take)
|
|
1652
|
+
|
|
1653
|
+
await time_out_assert(15, wallet_maker.get_pending_change_balance, 0)
|
|
1654
|
+
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds - fee)
|
|
1655
|
+
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount)
|
|
1656
|
+
await time_out_assert(15, wallet_taker.get_confirmed_balance, taker_funds)
|
|
1599
1657
|
|
|
1600
|
-
|
|
1601
|
-
|
|
1658
|
+
peer = wallet_node_taker.get_full_node_peer()
|
|
1659
|
+
with pytest.raises(ValueError, match="This offer is no longer valid"):
|
|
1660
|
+
await trade_manager_taker.respond_to_offer(Offer.from_bytes(trade_make.offer), peer, DEFAULT_TX_CONFIG)
|
|
1602
1661
|
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1662
|
+
# Now we're going to create the other way around for test coverage sake
|
|
1663
|
+
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1664
|
+
assert error is None
|
|
1665
|
+
assert success is True
|
|
1666
|
+
assert trade_make is not None
|
|
1606
1667
|
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1668
|
+
# This take should fail since we have no CATs to fulfill it with
|
|
1669
|
+
with pytest.raises(
|
|
1670
|
+
ValueError,
|
|
1671
|
+
match=f"Do not have a wallet for asset ID: {cat_wallet_maker.get_asset_id()} to fulfill offer",
|
|
1672
|
+
):
|
|
1673
|
+
await trade_manager_taker.respond_to_offer(Offer.from_bytes(trade_make.offer), peer, DEFAULT_TX_CONFIG)
|
|
1611
1674
|
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
# Due to current mempool rules, trying to force a take out of the mempool with a cancel will not work.
|
|
1619
|
-
# Uncomment this when/if it does
|
|
1620
|
-
|
|
1621
|
-
# trade_take, tx_records = await trade_manager_taker.respond_to_offer(
|
|
1622
|
-
# Offer.from_bytes(trade_make.offer),
|
|
1623
|
-
# )
|
|
1624
|
-
# await wallet_taker.wallet_state_manager.add_pending_transactions(tx_records)
|
|
1625
|
-
# await time_out_assert(15, full_node.txs_in_mempool, True, tx_records)
|
|
1626
|
-
# assert trade_take is not None
|
|
1627
|
-
# assert tx_records is not None
|
|
1628
|
-
# await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CONFIRM, trade_manager_taker, trade_take)
|
|
1629
|
-
# await time_out_assert(
|
|
1630
|
-
# 15,
|
|
1631
|
-
# full_node.tx_id_in_mempool,
|
|
1632
|
-
# True,
|
|
1633
|
-
# Offer.from_bytes(trade_take.offer).to_valid_spend().name(),
|
|
1634
|
-
# )
|
|
1635
|
-
|
|
1636
|
-
fee = uint64(2_000_000_000_000)
|
|
1637
|
-
|
|
1638
|
-
txs = await trade_manager_maker.cancel_pending_offers(
|
|
1639
|
-
[trade_make.trade_id], DEFAULT_TX_CONFIG, fee=fee, secure=True
|
|
1640
|
-
)
|
|
1641
|
-
await wallet_taker.wallet_state_manager.add_pending_transactions(txs)
|
|
1642
|
-
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager_maker, trade_make)
|
|
1643
|
-
await full_node.process_transaction_records(records=txs)
|
|
1644
|
-
|
|
1645
|
-
sum_of_outgoing = uint64(0)
|
|
1646
|
-
sum_of_incoming = uint64(0)
|
|
1647
|
-
for tx in txs:
|
|
1648
|
-
if tx.type == TransactionType.OUTGOING_TX.value:
|
|
1649
|
-
sum_of_outgoing = uint64(sum_of_outgoing + tx.amount)
|
|
1650
|
-
elif tx.type == TransactionType.INCOMING_TX.value:
|
|
1651
|
-
sum_of_incoming = uint64(sum_of_incoming + tx.amount)
|
|
1652
|
-
assert (sum_of_outgoing - sum_of_incoming) == 0
|
|
1653
|
-
|
|
1654
|
-
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager_maker, trade_make)
|
|
1655
|
-
# await time_out_assert(15, get_trade_and_status, TradeStatus.FAILED, trade_manager_taker, trade_take)
|
|
1656
|
-
|
|
1657
|
-
await time_out_assert(15, wallet_maker.get_pending_change_balance, 0)
|
|
1658
|
-
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds - fee)
|
|
1659
|
-
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount)
|
|
1660
|
-
await time_out_assert(15, wallet_taker.get_confirmed_balance, taker_funds)
|
|
1661
|
-
|
|
1662
|
-
peer = wallet_node_taker.get_full_node_peer()
|
|
1663
|
-
with pytest.raises(ValueError, match="This offer is no longer valid"):
|
|
1664
|
-
await trade_manager_taker.respond_to_offer(Offer.from_bytes(trade_make.offer), peer, DEFAULT_TX_CONFIG)
|
|
1665
|
-
|
|
1666
|
-
# Now we're going to create the other way around for test coverage sake
|
|
1667
|
-
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1668
|
-
assert error is None
|
|
1669
|
-
assert success is True
|
|
1670
|
-
assert trade_make is not None
|
|
1671
|
-
|
|
1672
|
-
# This take should fail since we have no CATs to fulfill it with
|
|
1673
|
-
with pytest.raises(
|
|
1674
|
-
ValueError,
|
|
1675
|
-
match=f"Do not have a wallet for asset ID: {cat_wallet_maker.get_asset_id()} to fulfill offer",
|
|
1676
|
-
):
|
|
1677
|
-
await trade_manager_taker.respond_to_offer(Offer.from_bytes(trade_make.offer), peer, DEFAULT_TX_CONFIG)
|
|
1678
|
-
|
|
1679
|
-
txs = await trade_manager_maker.cancel_pending_offers(
|
|
1680
|
-
[trade_make.trade_id], DEFAULT_TX_CONFIG, fee=uint64(0), secure=True
|
|
1681
|
-
)
|
|
1682
|
-
await wallet_taker.wallet_state_manager.add_pending_transactions(txs)
|
|
1683
|
-
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager_maker, trade_make)
|
|
1684
|
-
await full_node.process_transaction_records(records=txs)
|
|
1675
|
+
txs = await trade_manager_maker.cancel_pending_offers(
|
|
1676
|
+
[trade_make.trade_id], DEFAULT_TX_CONFIG, fee=uint64(0), secure=True
|
|
1677
|
+
)
|
|
1678
|
+
txs = await wallet_maker.wallet_state_manager.add_pending_transactions(txs)
|
|
1679
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager_maker, trade_make)
|
|
1680
|
+
await full_node.process_transaction_records(records=txs)
|
|
1685
1681
|
|
|
1686
|
-
|
|
1682
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager_maker, trade_make)
|
|
1687
1683
|
|
|
1688
|
-
@pytest.mark.anyio
|
|
1689
|
-
async def test_trade_cancellation_balance_check(self, wallets_prefarm):
|
|
1690
|
-
(
|
|
1691
|
-
[wallet_node_maker, maker_funds],
|
|
1692
|
-
[wallet_node_taker, taker_funds],
|
|
1693
|
-
full_node,
|
|
1694
|
-
) = wallets_prefarm
|
|
1695
|
-
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1696
1684
|
|
|
1697
|
-
|
|
1685
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
1686
|
+
@pytest.mark.anyio
|
|
1687
|
+
async def test_trade_cancellation_balance_check(wallets_prefarm):
|
|
1688
|
+
(
|
|
1689
|
+
[wallet_node_maker, maker_funds],
|
|
1690
|
+
[wallet_node_taker, taker_funds],
|
|
1691
|
+
full_node,
|
|
1692
|
+
) = wallets_prefarm
|
|
1693
|
+
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1694
|
+
|
|
1695
|
+
xch_to_cat_amount = uint64(100)
|
|
1696
|
+
|
|
1697
|
+
async with wallet_node_maker.wallet_state_manager.lock:
|
|
1698
|
+
cat_wallet_maker, tx_records = await CATWallet.create_new_cat_wallet(
|
|
1699
|
+
wallet_node_maker.wallet_state_manager,
|
|
1700
|
+
wallet_maker,
|
|
1701
|
+
{"identifier": "genesis_by_id"},
|
|
1702
|
+
xch_to_cat_amount,
|
|
1703
|
+
DEFAULT_TX_CONFIG,
|
|
1704
|
+
)
|
|
1698
1705
|
|
|
1699
|
-
|
|
1700
|
-
cat_wallet_maker, _ = await CATWallet.create_new_cat_wallet(
|
|
1701
|
-
wallet_node_maker.wallet_state_manager,
|
|
1702
|
-
wallet_maker,
|
|
1703
|
-
{"identifier": "genesis_by_id"},
|
|
1704
|
-
xch_to_cat_amount,
|
|
1705
|
-
DEFAULT_TX_CONFIG,
|
|
1706
|
-
)
|
|
1706
|
+
await full_node.process_transaction_records(records=tx_records)
|
|
1707
1707
|
|
|
1708
|
-
|
|
1708
|
+
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount)
|
|
1709
|
+
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1710
|
+
maker_funds -= xch_to_cat_amount
|
|
1711
|
+
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1709
1712
|
|
|
1710
|
-
|
|
1713
|
+
chia_for_cat = {
|
|
1714
|
+
wallet_maker.id(): -(await wallet_maker.get_spendable_balance()),
|
|
1715
|
+
cat_wallet_maker.id(): 4,
|
|
1716
|
+
}
|
|
1711
1717
|
|
|
1712
|
-
|
|
1713
|
-
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1714
|
-
maker_funds -= xch_to_cat_amount
|
|
1715
|
-
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1718
|
+
trade_manager_maker = wallet_node_maker.wallet_state_manager.trade_manager
|
|
1716
1719
|
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1720
|
+
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1721
|
+
await time_out_assert(10, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make)
|
|
1722
|
+
assert error is None
|
|
1723
|
+
assert success is True
|
|
1724
|
+
assert trade_make is not None
|
|
1725
|
+
txs = await trade_manager_maker.cancel_pending_offers(
|
|
1726
|
+
[trade_make.trade_id], DEFAULT_TX_CONFIG, fee=uint64(0), secure=True
|
|
1727
|
+
)
|
|
1728
|
+
txs = await trade_manager_maker.wallet_state_manager.add_pending_transactions(txs)
|
|
1729
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager_maker, trade_make)
|
|
1730
|
+
await full_node.process_transaction_records(records=txs)
|
|
1721
1731
|
|
|
1722
|
-
|
|
1732
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager_maker, trade_make)
|
|
1723
1733
|
|
|
1724
|
-
async def get_trade_and_status(trade_manager, trade) -> TradeStatus:
|
|
1725
|
-
trade_rec = await trade_manager.get_trade_by_id(trade.trade_id)
|
|
1726
|
-
return TradeStatus(trade_rec.status)
|
|
1727
1734
|
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1735
|
+
@pytest.mark.limit_consensus_modes(allowed=[ConsensusMode.PLAIN, ConsensusMode.HARD_FORK_2_0], reason="save time")
|
|
1736
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
1737
|
+
@pytest.mark.anyio
|
|
1738
|
+
async def test_trade_conflict(three_wallets_prefarm):
|
|
1739
|
+
(
|
|
1740
|
+
[wallet_node_maker, maker_funds],
|
|
1741
|
+
[wallet_node_taker, taker_funds],
|
|
1742
|
+
[wallet_node_trader, trader_funds],
|
|
1743
|
+
full_node,
|
|
1744
|
+
) = three_wallets_prefarm
|
|
1745
|
+
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1746
|
+
xch_to_cat_amount = uint64(100)
|
|
1747
|
+
|
|
1748
|
+
async with wallet_node_maker.wallet_state_manager.lock:
|
|
1749
|
+
cat_wallet_maker, tx_records = await CATWallet.create_new_cat_wallet(
|
|
1750
|
+
wallet_node_maker.wallet_state_manager,
|
|
1751
|
+
wallet_maker,
|
|
1752
|
+
{"identifier": "genesis_by_id"},
|
|
1753
|
+
xch_to_cat_amount,
|
|
1754
|
+
DEFAULT_TX_CONFIG,
|
|
1735
1755
|
)
|
|
1736
|
-
await trade_manager_maker.wallet_state_manager.add_pending_transactions(txs)
|
|
1737
|
-
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager_maker, trade_make)
|
|
1738
|
-
await full_node.process_transaction_records(records=txs)
|
|
1739
1756
|
|
|
1740
|
-
|
|
1757
|
+
await full_node.process_transaction_records(records=tx_records)
|
|
1741
1758
|
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
[wallet_node_maker, maker_funds],
|
|
1747
|
-
[wallet_node_taker, taker_funds],
|
|
1748
|
-
[wallet_node_trader, trader_funds],
|
|
1749
|
-
full_node,
|
|
1750
|
-
) = three_wallets_prefarm
|
|
1751
|
-
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1752
|
-
xch_to_cat_amount = uint64(100)
|
|
1759
|
+
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount)
|
|
1760
|
+
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1761
|
+
maker_funds -= xch_to_cat_amount
|
|
1762
|
+
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1753
1763
|
|
|
1754
|
-
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
|
|
1758
|
-
{"identifier": "genesis_by_id"},
|
|
1759
|
-
xch_to_cat_amount,
|
|
1760
|
-
DEFAULT_TX_CONFIG,
|
|
1761
|
-
)
|
|
1762
|
-
|
|
1763
|
-
tx_records: List[TransactionRecord] = await wallet_node_maker.wallet_state_manager.tx_store.get_not_sent()
|
|
1764
|
+
chia_for_cat = {
|
|
1765
|
+
wallet_maker.id(): 1000,
|
|
1766
|
+
cat_wallet_maker.id(): -4,
|
|
1767
|
+
}
|
|
1764
1768
|
|
|
1765
|
-
|
|
1769
|
+
trade_manager_maker = wallet_node_maker.wallet_state_manager.trade_manager
|
|
1770
|
+
trade_manager_taker = wallet_node_taker.wallet_state_manager.trade_manager
|
|
1771
|
+
trade_manager_trader = wallet_node_trader.wallet_state_manager.trade_manager
|
|
1766
1772
|
|
|
1767
|
-
|
|
1768
|
-
|
|
1769
|
-
|
|
1770
|
-
|
|
1773
|
+
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1774
|
+
await time_out_assert(10, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make)
|
|
1775
|
+
assert error is None
|
|
1776
|
+
assert success is True
|
|
1777
|
+
assert trade_make is not None
|
|
1778
|
+
peer = wallet_node_taker.get_full_node_peer()
|
|
1779
|
+
offer = Offer.from_bytes(trade_make.offer)
|
|
1780
|
+
[offer], signing_response = await wallet_node_maker.wallet_state_manager.sign_offers([offer])
|
|
1781
|
+
tr1, txs1 = await trade_manager_taker.respond_to_offer(offer, peer, DEFAULT_TX_CONFIG, fee=uint64(10))
|
|
1782
|
+
txs1 = await trade_manager_taker.wallet_state_manager.add_pending_transactions(
|
|
1783
|
+
txs1, additional_signing_responses=signing_response
|
|
1784
|
+
)
|
|
1785
|
++
await full_node.wait_transaction_records_entered_mempool(records=txs1)
|
|
1786
|
+
# we shouldn't be able to respond to a duplicate offer
|
|
1787
|
+
with pytest.raises(ValueError):
|
|
1788
|
+
await trade_manager_taker.respond_to_offer(offer, peer, DEFAULT_TX_CONFIG, fee=uint64(10))
|
|
1789
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CONFIRM, trade_manager_taker, tr1)
|
|
1790
|
+
# pushing into mempool while already in it should fail
|
|
1791
|
+
[offer], signing_response = await wallet_node_maker.wallet_state_manager.sign_offers([offer])
|
|
1792
|
+
tr2, txs2 = await trade_manager_trader.respond_to_offer(offer, peer, DEFAULT_TX_CONFIG, fee=uint64(10))
|
|
1793
|
+
txs2 = await trade_manager_taker.wallet_state_manager.add_pending_transactions(
|
|
1794
|
+
txs2, additional_signing_responses=signing_response
|
|
1795
|
+
)
|
|
1796
|
++
await trade_manager_trader.wallet_state_manager.add_pending_transactions(txs2)
|
|
1797
|
+
assert await trade_manager_trader.get_coins_of_interest()
|
|
1798
|
+
offer_tx_records: List[TransactionRecord] = await wallet_node_maker.wallet_state_manager.tx_store.get_not_sent()
|
|
1799
|
+
await full_node.process_transaction_records(records=offer_tx_records)
|
|
1800
|
++
await full_node.wait_for_wallet_synced(wallet_node=wallet_node_trader, timeout=20)
|
|
1801
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.FAILED, trade_manager_trader, tr2)
|
|
1771
1802
|
|
|
1772
|
-
chia_for_cat = {
|
|
1773
|
-
wallet_maker.id(): 1000,
|
|
1774
|
-
cat_wallet_maker.id(): -4,
|
|
1775
|
-
}
|
|
1776
1803
|
|
|
1777
|
-
|
|
1778
|
-
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
# we shouldn't be able to respond to a duplicate offer
|
|
1797
|
-
with pytest.raises(ValueError):
|
|
1798
|
-
await trade_manager_taker.respond_to_offer(offer, peer, DEFAULT_TX_CONFIG, fee=uint64(10))
|
|
1799
|
-
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CONFIRM, trade_manager_taker, tr1)
|
|
1800
|
-
# pushing into mempool while already in it should fail
|
|
1801
|
-
tr2, txs2 = await trade_manager_trader.respond_to_offer(offer, peer, DEFAULT_TX_CONFIG, fee=uint64(10))
|
|
1802
|
-
await trade_manager_taker.wallet_state_manager.add_pending_transactions(txs2)
|
|
1803
|
-
assert await trade_manager_trader.get_coins_of_interest()
|
|
1804
|
-
offer_tx_records: List[TransactionRecord] = await wallet_node_maker.wallet_state_manager.tx_store.get_not_sent()
|
|
1805
|
-
await full_node.process_transaction_records(records=offer_tx_records)
|
|
1806
|
-
await time_out_assert(15, get_trade_and_status, TradeStatus.FAILED, trade_manager_trader, tr2)
|
|
1807
|
-
|
|
1808
|
-
@pytest.mark.anyio
|
|
1809
|
-
async def test_trade_bad_spend(self, wallets_prefarm):
|
|
1810
|
-
(
|
|
1811
|
-
[wallet_node_maker, maker_funds],
|
|
1812
|
-
[wallet_node_taker, taker_funds],
|
|
1813
|
-
full_node,
|
|
1814
|
-
) = wallets_prefarm
|
|
1815
|
-
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1816
|
-
xch_to_cat_amount = uint64(100)
|
|
1804
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
1805
|
+
@pytest.mark.anyio
|
|
1806
|
+
async def test_trade_bad_spend(wallets_prefarm):
|
|
1807
|
+
(
|
|
1808
|
+
[wallet_node_maker, maker_funds],
|
|
1809
|
+
[wallet_node_taker, taker_funds],
|
|
1810
|
+
full_node,
|
|
1811
|
+
) = wallets_prefarm
|
|
1812
|
+
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1813
|
+
xch_to_cat_amount = uint64(100)
|
|
1814
|
+
|
|
1815
|
+
async with wallet_node_maker.wallet_state_manager.lock:
|
|
1816
|
+
cat_wallet_maker, tx_records = await CATWallet.create_new_cat_wallet(
|
|
1817
|
+
wallet_node_maker.wallet_state_manager,
|
|
1818
|
+
wallet_maker,
|
|
1819
|
+
{"identifier": "genesis_by_id"},
|
|
1820
|
+
xch_to_cat_amount,
|
|
1821
|
+
DEFAULT_TX_CONFIG,
|
|
1822
|
+
)
|
|
1817
1823
|
|
|
1818
|
-
|
|
1819
|
-
cat_wallet_maker, _ = await CATWallet.create_new_cat_wallet(
|
|
1820
|
-
wallet_node_maker.wallet_state_manager,
|
|
1821
|
-
wallet_maker,
|
|
1822
|
-
{"identifier": "genesis_by_id"},
|
|
1823
|
-
xch_to_cat_amount,
|
|
1824
|
-
DEFAULT_TX_CONFIG,
|
|
1825
|
-
)
|
|
1824
|
+
await full_node.process_transaction_records(records=tx_records)
|
|
1826
1825
|
|
|
1827
|
-
|
|
1826
|
+
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount)
|
|
1827
|
+
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1828
|
+
maker_funds -= xch_to_cat_amount
|
|
1829
|
+
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1828
1830
|
|
|
1829
|
-
|
|
1831
|
+
chia_for_cat = {
|
|
1832
|
+
wallet_maker.id(): 1000,
|
|
1833
|
+
cat_wallet_maker.id(): -4,
|
|
1834
|
+
}
|
|
1830
1835
|
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
maker_funds -= xch_to_cat_amount
|
|
1834
|
-
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1836
|
+
trade_manager_maker = wallet_node_maker.wallet_state_manager.trade_manager
|
|
1837
|
+
trade_manager_taker = wallet_node_taker.wallet_state_manager.trade_manager
|
|
1835
1838
|
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1839
|
+
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1840
|
+
await time_out_assert(30, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make)
|
|
1841
|
+
assert error is None
|
|
1842
|
+
assert success is True
|
|
1843
|
+
assert trade_make is not None
|
|
1844
|
+
peer = wallet_node_taker.get_full_node_peer()
|
|
1845
|
+
offer = Offer.from_bytes(trade_make.offer)
|
|
1846
|
+
bundle = dataclasses.replace(offer._bundle, aggregated_signature=G2Element())
|
|
1847
|
+
offer = dataclasses.replace(offer, _bundle=bundle)
|
|
1848
|
+
tr1, txs1 = await trade_manager_taker.respond_to_offer(offer, peer, DEFAULT_TX_CONFIG, fee=uint64(10))
|
|
1849
|
+
txs1 = await trade_manager_taker.wallet_state_manager.add_pending_transactions(txs1, sign=False)
|
|
1850
|
+
wallet_node_taker.wallet_tx_resend_timeout_secs = 0 # don't wait for resend
|
|
1840
1851
|
|
|
1841
|
-
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
async def get_trade_and_status(trade_manager, trade) -> TradeStatus:
|
|
1845
|
-
trade_rec = await trade_manager.get_trade_by_id(trade.trade_id)
|
|
1846
|
-
if trade_rec:
|
|
1847
|
-
return TradeStatus(trade_rec.status)
|
|
1848
|
-
raise ValueError("Couldn't find the trade record")
|
|
1849
|
-
|
|
1850
|
-
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1851
|
-
await time_out_assert(30, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make)
|
|
1852
|
-
assert error is None
|
|
1853
|
-
assert success is True
|
|
1854
|
-
assert trade_make is not None
|
|
1855
|
-
peer = wallet_node_taker.get_full_node_peer()
|
|
1856
|
-
offer = Offer.from_bytes(trade_make.offer)
|
|
1857
|
-
bundle = dataclasses.replace(offer._bundle, aggregated_signature=G2Element())
|
|
1858
|
-
offer = dataclasses.replace(offer, _bundle=bundle)
|
|
1859
|
-
tr1, txs1 = await trade_manager_taker.respond_to_offer(offer, peer, DEFAULT_TX_CONFIG, fee=uint64(10))
|
|
1860
|
-
await trade_manager_taker.wallet_state_manager.add_pending_transactions(txs1)
|
|
1861
|
-
wallet_node_taker.wallet_tx_resend_timeout_secs = 0 # don't wait for resend
|
|
1862
|
-
|
|
1863
|
-
def check_wallet_cache_empty() -> bool:
|
|
1864
|
-
return wallet_node_taker._tx_messages_in_progress == {}
|
|
1865
|
-
|
|
1866
|
-
for _ in range(10):
|
|
1867
|
-
print(await wallet_node_taker._resend_queue())
|
|
1868
|
-
await time_out_assert(5, check_wallet_cache_empty, True)
|
|
1869
|
-
offer_tx_records: List[TransactionRecord] = await wallet_node_maker.wallet_state_manager.tx_store.get_not_sent()
|
|
1870
|
-
await full_node.process_transaction_records(records=offer_tx_records)
|
|
1871
|
-
await time_out_assert(30, get_trade_and_status, TradeStatus.FAILED, trade_manager_taker, tr1)
|
|
1872
|
-
|
|
1873
|
-
@pytest.mark.anyio
|
|
1874
|
-
async def test_trade_high_fee(self, wallets_prefarm):
|
|
1875
|
-
(
|
|
1876
|
-
[wallet_node_maker, maker_funds],
|
|
1877
|
-
[wallet_node_taker, taker_funds],
|
|
1878
|
-
full_node,
|
|
1879
|
-
) = wallets_prefarm
|
|
1880
|
-
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1881
|
-
xch_to_cat_amount = uint64(100)
|
|
1852
|
+
def check_wallet_cache_empty() -> bool:
|
|
1853
|
+
return wallet_node_taker._tx_messages_in_progress == {}
|
|
1882
1854
|
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
DEFAULT_TX_CONFIG,
|
|
1890
|
-
)
|
|
1855
|
+
for _ in range(10):
|
|
1856
|
+
await wallet_node_taker._resend_queue()
|
|
1857
|
+
await time_out_assert(5, check_wallet_cache_empty, True)
|
|
1858
|
+
offer_tx_records: List[TransactionRecord] = await wallet_node_maker.wallet_state_manager.tx_store.get_not_sent()
|
|
1859
|
+
await full_node.process_transaction_records(records=offer_tx_records)
|
|
1860
|
+
await time_out_assert(30, get_trade_and_status, TradeStatus.FAILED, trade_manager_taker, tr1)
|
|
1891
1861
|
|
|
1892
|
-
tx_records: List[TransactionRecord] = await wallet_node_maker.wallet_state_manager.tx_store.get_not_sent()
|
|
1893
1862
|
|
|
1894
|
-
|
|
1863
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
1864
|
+
@pytest.mark.anyio
|
|
1865
|
+
async def test_trade_high_fee(wallets_prefarm):
|
|
1866
|
+
(
|
|
1867
|
+
[wallet_node_maker, maker_funds],
|
|
1868
|
+
[wallet_node_taker, taker_funds],
|
|
1869
|
+
full_node,
|
|
1870
|
+
) = wallets_prefarm
|
|
1871
|
+
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1872
|
+
xch_to_cat_amount = uint64(100)
|
|
1873
|
+
|
|
1874
|
+
async with wallet_node_maker.wallet_state_manager.lock:
|
|
1875
|
+
cat_wallet_maker, tx_records = await CATWallet.create_new_cat_wallet(
|
|
1876
|
+
wallet_node_maker.wallet_state_manager,
|
|
1877
|
+
wallet_maker,
|
|
1878
|
+
{"identifier": "genesis_by_id"},
|
|
1879
|
+
xch_to_cat_amount,
|
|
1880
|
+
DEFAULT_TX_CONFIG,
|
|
1881
|
+
)
|
|
1895
1882
|
|
|
1896
|
-
|
|
1897
|
-
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1898
|
-
maker_funds -= xch_to_cat_amount
|
|
1899
|
-
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1883
|
+
await full_node.process_transaction_records(records=tx_records)
|
|
1900
1884
|
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
|
-
|
|
1904
|
-
|
|
1885
|
+
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount)
|
|
1886
|
+
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1887
|
+
maker_funds -= xch_to_cat_amount
|
|
1888
|
+
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1905
1889
|
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
trade_rec = await trade_manager.get_trade_by_id(trade.trade_id)
|
|
1911
|
-
if trade_rec:
|
|
1912
|
-
return TradeStatus(trade_rec.status)
|
|
1913
|
-
raise ValueError("Couldn't find the trade record")
|
|
1914
|
-
|
|
1915
|
-
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1916
|
-
await time_out_assert(10, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make)
|
|
1917
|
-
assert error is None
|
|
1918
|
-
assert success is True
|
|
1919
|
-
assert trade_make is not None
|
|
1920
|
-
peer = wallet_node_taker.get_full_node_peer()
|
|
1921
|
-
offer = Offer.from_bytes(trade_make.offer)
|
|
1922
|
-
tr1, txs1 = await trade_manager_taker.respond_to_offer(
|
|
1923
|
-
offer, peer, DEFAULT_TX_CONFIG, fee=uint64(1000000000000)
|
|
1924
|
-
)
|
|
1925
|
-
await trade_manager_taker.wallet_state_manager.add_pending_transactions(txs1)
|
|
1926
|
-
await full_node.process_transaction_records(records=txs1)
|
|
1927
|
-
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, tr1)
|
|
1890
|
+
chia_for_cat = {
|
|
1891
|
+
wallet_maker.id(): 1000,
|
|
1892
|
+
cat_wallet_maker.id(): -4,
|
|
1893
|
+
}
|
|
1928
1894
|
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
(
|
|
1932
|
-
[wallet_node_maker, maker_funds],
|
|
1933
|
-
[wallet_node_taker, taker_funds],
|
|
1934
|
-
full_node,
|
|
1935
|
-
) = wallets_prefarm
|
|
1936
|
-
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1937
|
-
xch_to_cat_amount = uint64(100)
|
|
1895
|
+
trade_manager_maker = wallet_node_maker.wallet_state_manager.trade_manager
|
|
1896
|
+
trade_manager_taker = wallet_node_taker.wallet_state_manager.trade_manager
|
|
1938
1897
|
|
|
1939
|
-
|
|
1940
|
-
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
|
|
1944
|
-
|
|
1945
|
-
|
|
1946
|
-
|
|
1898
|
+
success, trade_make, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1899
|
+
await time_out_assert(10, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make)
|
|
1900
|
+
assert error is None
|
|
1901
|
+
assert success is True
|
|
1902
|
+
assert trade_make is not None
|
|
1903
|
+
peer = wallet_node_taker.get_full_node_peer()
|
|
1904
|
+
offer = Offer.from_bytes(trade_make.offer)
|
|
1905
|
+
[offer], signing_response = await wallet_node_maker.wallet_state_manager.sign_offers(
|
|
1906
|
+
[Offer.from_bytes(trade_make.offer)]
|
|
1907
|
+
)
|
|
1908
|
+
tr1, txs1 = await trade_manager_taker.respond_to_offer(offer, peer, DEFAULT_TX_CONFIG, fee=uint64(1000000000000))
|
|
1909
|
+
txs1 = await trade_manager_taker.wallet_state_manager.add_pending_transactions(
|
|
1910
|
+
txs1, additional_signing_responses=signing_response
|
|
1911
|
+
)
|
|
1912
|
+
await full_node.process_transaction_records(records=txs1)
|
|
1913
|
+
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, tr1)
|
|
1947
1914
|
|
|
1948
|
-
tx_records: List[TransactionRecord] = await wallet_node_maker.wallet_state_manager.tx_store.get_not_sent()
|
|
1949
1915
|
|
|
1950
|
-
|
|
1916
|
+
@pytest.mark.parametrize("trusted", [True, False])
|
|
1917
|
+
@pytest.mark.anyio
|
|
1918
|
+
async def test_aggregated_trade_state(wallets_prefarm):
|
|
1919
|
+
(
|
|
1920
|
+
[wallet_node_maker, maker_funds],
|
|
1921
|
+
[wallet_node_taker, taker_funds],
|
|
1922
|
+
full_node,
|
|
1923
|
+
) = wallets_prefarm
|
|
1924
|
+
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
|
|
1925
|
+
xch_to_cat_amount = uint64(100)
|
|
1926
|
+
|
|
1927
|
+
async with wallet_node_maker.wallet_state_manager.lock:
|
|
1928
|
+
cat_wallet_maker, tx_records = await CATWallet.create_new_cat_wallet(
|
|
1929
|
+
wallet_node_maker.wallet_state_manager,
|
|
1930
|
+
wallet_maker,
|
|
1931
|
+
{"identifier": "genesis_by_id"},
|
|
1932
|
+
xch_to_cat_amount,
|
|
1933
|
+
DEFAULT_TX_CONFIG,
|
|
1934
|
+
)
|
|
1951
1935
|
|
|
1952
|
-
|
|
1953
|
-
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1954
|
-
maker_funds -= xch_to_cat_amount
|
|
1955
|
-
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1936
|
+
await full_node.process_transaction_records(records=tx_records)
|
|
1956
1937
|
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
cat_for_chia = {
|
|
1962
|
-
wallet_maker.id(): -1,
|
|
1963
|
-
cat_wallet_maker.id(): 1,
|
|
1964
|
-
}
|
|
1938
|
+
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount)
|
|
1939
|
+
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount)
|
|
1940
|
+
maker_funds -= xch_to_cat_amount
|
|
1941
|
+
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
|
|
1965
1942
|
|
|
1966
|
-
|
|
1967
|
-
|
|
1943
|
+
chia_for_cat = {
|
|
1944
|
+
wallet_maker.id(): 2,
|
|
1945
|
+
cat_wallet_maker.id(): -2,
|
|
1946
|
+
}
|
|
1947
|
+
cat_for_chia = {
|
|
1948
|
+
wallet_maker.id(): -1,
|
|
1949
|
+
cat_wallet_maker.id(): 1,
|
|
1950
|
+
}
|
|
1968
1951
|
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
if trade_rec:
|
|
1972
|
-
return TradeStatus(trade_rec.status)
|
|
1973
|
-
raise ValueError("Couldn't find the trade record") # pragma: no cover
|
|
1952
|
+
trade_manager_maker = wallet_node_maker.wallet_state_manager.trade_manager
|
|
1953
|
+
trade_manager_taker = wallet_node_taker.wallet_state_manager.trade_manager
|
|
1974
1954
|
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
|
|
1982
|
-
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
await time_out_assert(10, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make_2)
|
|
1986
|
-
assert error is None
|
|
1987
|
-
assert success is True
|
|
1988
|
-
assert trade_make_2 is not None
|
|
1955
|
+
success, trade_make_1, _, error = await trade_manager_maker.create_offer_for_ids(chia_for_cat, DEFAULT_TX_CONFIG)
|
|
1956
|
+
await time_out_assert(10, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make_1)
|
|
1957
|
+
assert error is None
|
|
1958
|
+
assert success is True
|
|
1959
|
+
assert trade_make_1 is not None
|
|
1960
|
+
success, trade_make_2, _, error = await trade_manager_maker.create_offer_for_ids(cat_for_chia, DEFAULT_TX_CONFIG)
|
|
1961
|
+
await time_out_assert(10, get_trade_and_status, TradeStatus.PENDING_ACCEPT, trade_manager_maker, trade_make_2)
|
|
1962
|
+
assert error is None
|
|
1963
|
+
assert success is True
|
|
1964
|
+
assert trade_make_2 is not None
|
|
1989
1965
|
|
|
1990
|
-
|
|
1966
|
+
[offer_1], signing_response_1 = await wallet_node_maker.wallet_state_manager.sign_offers(
|
|
1967
|
+
[Offer.from_bytes(trade_make_1.offer)]
|
|
1968
|
+
)
|
|
1969
|
+
[offer_2], signing_response_2 = await wallet_node_maker.wallet_state_manager.sign_offers(
|
|
1970
|
+
[Offer.from_bytes(trade_make_2.offer)]
|
|
1971
|
+
)
|
|
1972
|
+
agg_offer = Offer.aggregate([offer_1, offer_2])
|
|
1991
1973
|
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
|
|
1974
|
+
peer = wallet_node_taker.get_full_node_peer()
|
|
1975
|
+
trade_take, tx_records = await trade_manager_taker.respond_to_offer(
|
|
1976
|
+
agg_offer,
|
|
1977
|
+
peer,
|
|
1978
|
+
DEFAULT_TX_CONFIG,
|
|
1979
|
+
)
|
|
1980
|
+
assert trade_take is not None
|
|
1981
|
+
assert tx_records is not None
|
|
2000
1982
|
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
1983
|
+
tx_records = await trade_manager_taker.wallet_state_manager.add_pending_transactions(
|
|
1984
|
+
tx_records,
|
|
1985
|
+
additional_signing_responses=[*signing_response_1, *signing_response_2],
|
|
1986
|
+
)
|
|
1987
|
+
await full_node.process_transaction_records(records=tx_records)
|
|
1988
|
+
await full_node.wait_for_wallets_synced(wallet_nodes=[wallet_node_maker, wallet_node_taker], timeout=60)
|
|
2004
1989
|
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
|
|
1990
|
+
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds + 1)
|
|
1991
|
+
await time_out_assert(15, wallet_maker.get_unconfirmed_balance, maker_funds + 1)
|
|
1992
|
+
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xch_to_cat_amount - 1)
|
|
1993
|
+
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xch_to_cat_amount - 1)
|
|
@@@ -555,7 -549,7 +555,7 @@@ def test_validator() -> None
|
|
|
555
555
|
1200,
|
|
556
556
|
)
|
|
557
557
|
full_proposal = SINGLETON_MOD.curry(proposal_struct, proposal)
|
|
558
|
--
proposal_amt = 10
|
|
558
|
++
proposal_amt = uint64(10)
|
|
559
559
|
proposal_coin_id = Coin(parent_id, full_proposal.get_tree_hash(), proposal_amt).name()
|
|
560
560
|
solution = Program.to(
|
|
561
561
|
[
|
|
@@@ -696,7 -678,7 +696,7 @@@ def test_merge_p2_singleton() -> None
|
|
|
696
696
|
|
|
697
697
|
# Merge Spend (not output creator)
|
|
698
698
|
output_parent_id = Program.to("output_parent").get_tree_hash()
|
|
699
|
--
output_coin_amount = 100
|
|
699
|
++
output_coin_amount = uint64(100)
|
|
700
700
|
aggregator_sol = Program.to([my_id, my_puzhash, 300, 0, [output_parent_id, output_coin_amount]])
|
|
701
701
|
merge_p2_singleton_sol = Program.to([aggregator_sol, 0, 0, 0, 0])
|
|
702
702
|
conds = conditions_dict_for_solution(p2_singleton, merge_p2_singleton_sol, INFINITE_COST)
|
|
@@@ -706,7 -688,7 +706,7 @@@
|
|
|
706
706
|
|
|
707
707
|
# Merge Spend (output creator)
|
|
708
708
|
fake_parent_id = Program.to("fake_parent").get_tree_hash()
|
|
709
|
--
merged_coin_id = Coin(fake_parent_id, my_puzhash, 200).name()
|
|
709
|
++
merged_coin_id = Coin(fake_parent_id, my_puzhash, uint64(200)).name()
|
|
710
710
|
merge_sol = Program.to([[my_id, my_puzhash, 100, [[fake_parent_id, my_puzhash, 200]], 0]])
|
|
711
711
|
conds = conditions_dict_for_solution(p2_singleton, merge_sol, INFINITE_COST)
|
|
712
712
|
assert len(conds) == 7
|
|
@@@ -719,12 -701,9 +719,12 @@@
|
|
|
719
719
|
Program.to("fake_parent_2").get_tree_hash(),
|
|
720
720
|
Program.to("fake_parent_3").get_tree_hash(),
|
|
721
721
|
]
|
|
722
|
--
amounts = [1000, 2000, 3000]
|
|
723
|
-
parent_puzhash_amounts = [
|
|
724
|
-
merge_coin_ids = [
|
|
722
|
++
amounts = [uint64(1000), uint64(2000), uint64(3000)]
|
|
723
|
+
parent_puzhash_amounts = []
|
|
724
|
+
merge_coin_ids: List[bytes32] = []
|
|
725
|
+
for pid, amt in zip(parent_ids, amounts):
|
|
726
|
+
parent_puzhash_amounts.append([pid, my_puzhash, amt])
|
|
727
|
+
merge_coin_ids.append(Coin(pid, my_puzhash, amt).name())
|
|
725
728
|
|
|
726
729
|
output_parent_amount = [output_parent_id, output_coin_amount]
|
|
727
730
|
output_coin_id = Coin(output_parent_id, my_puzhash, output_coin_amount).name()
|
|
@@@ -843,7 -820,7 +843,7 @@@ def test_treasury() -> None
|
|
|
843
843
|
assert len(conds.as_python()) == 3
|
|
844
844
|
|
|
845
845
|
# Proposal Spend
|
|
846
|
--
proposal_amt = 10
|
|
846
|
++
proposal_amt = uint64(10)
|
|
847
847
|
proposal_coin_id = Coin(parent_id, full_proposal.get_tree_hash(), proposal_amt).name()
|
|
848
848
|
solution = Program.to(
|
|
849
849
|
[
|
|
@@@ -1090,7 -1061,7 +1090,7 @@@ def test_proposal_lifecycle() -> None
|
|
|
1090
1090
|
)
|
|
1091
1091
|
full_proposal: Program = SINGLETON_MOD.curry(proposal_singleton_struct, proposal)
|
|
1092
1092
|
full_proposal_puzhash: bytes32 = full_proposal.get_tree_hash()
|
|
1093
|
--
proposal_amt = 11
|
|
1093
|
++
proposal_amt = uint64(11)
|
|
1094
1094
|
proposal_coin_id = Coin(parent_id, full_proposal_puzhash, proposal_amt).name()
|
|
1095
1095
|
|
|
1096
1096
|
treasury_solution: Program = Program.to(
|
|
@@@ -540,6 -543,6 +540,7 @@@ class TestDIDWallet
|
|
|
540
540
|
spend_bundle = spend_bundle_list[0].spend_bundle
|
|
541
541
|
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
|
|
542
542
|
await full_node_api.farm_blocks_to_wallet(1, wallet2)
|
|
543
|
++
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
543
544
|
|
|
544
545
|
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
|
|
545
546
|
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
|
|
@@@ -1048,6 -1060,6 +1049,11 @@@
|
|
|
1048
1049
|
puzhash = did_wallet_1.did_info.current_inner.get_tree_hash()
|
|
1049
1050
|
parent_num = get_parent_num(did_wallet_1)
|
|
1050
1051
|
|
|
1052
|
++
bad_metadata = {"Twitter": {"url": "http://www.twitter.com"}}
|
|
1053
|
++
with pytest.raises(ValueError) as e:
|
|
1054
|
++
await did_wallet_1.update_metadata(bad_metadata)
|
|
1055
|
++
assert e.match("Metadata key value pairs must be strings.")
|
|
1056
|
++
|
|
1051
1057
|
metadata = {}
|
|
1052
1058
|
metadata["Twitter"] = "http://www.twitter.com"
|
|
1053
1059
|
await did_wallet_1.update_metadata(metadata)
|
|
@@@ -21,7 -21,7 +21,7 @@@ def test_ownership_outer_puzzle() -> No
|
|
|
21
21
|
# (mod (current_owner conditions solution)
|
|
22
22
|
# (list current_owner () conditions)
|
|
23
23
|
# )
|
|
24
|
--
transfer_program = assemble(
|
|
24
|
++
transfer_program = assemble(
|
|
25
25
|
"""
|
|
26
26
|
(c 2 (c () (c 5 ())))
|
|
27
27
|
"""
|
|
@@@ -6,12 -6,13 +6,13 @@@ import loggin
|
|
|
6
6
|
import pytest
|
|
7
7
|
|
|
8
8
|
from chia.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
|
|
9
|
++
from chia.data_layer.data_layer_util import DLProof, HashOnlyProof, ProofLayer, StoreProofsHashes
|
|
9
10
|
from chia.data_layer.data_layer_wallet import Mirror
|
|
10
11
|
from chia.rpc.wallet_rpc_client import WalletRpcClient
|
|
11
|
-
from chia.simulator.setup_nodes import SimulatorsAndWalletsServices
|
|
12
12
|
from chia.simulator.simulator_protocol import FarmNewBlockProtocol
|
|
13
13
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
14
14
|
from chia.types.peer_info import PeerInfo
|
|
15
|
--
from chia.util.ints import uint32, uint64
|
|
15
|
++
from chia.util.ints import uint8, uint32, uint64
|
|
16
16
|
from chia.wallet.db_wallet.db_wallet_puzzles import create_mirror_puzzle
|
|
17
17
|
from tests.conftest import ConsensusMode
|
|
18
18
|
from tests.util.rpc import validate_get_routes
|
|
@@@ -222,3 -222,3 +223,57 @@@ class TestWalletRpc
|
|
|
222
223
|
client_2.close()
|
|
223
224
|
await client.await_closed()
|
|
224
225
|
await client_2.await_closed()
|
|
226
|
++
|
|
227
|
++
@pytest.mark.limit_consensus_modes(allowed=[ConsensusMode.PLAIN, ConsensusMode.HARD_FORK_2_0], reason="save time")
|
|
228
|
++
@pytest.mark.parametrize("trusted", [True, False])
|
|
229
|
++
@pytest.mark.anyio
|
|
230
|
++
async def test_wallet_dl_verify_proof(
|
|
231
|
++
self, one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, trusted: bool, self_hostname: str
|
|
232
|
++
) -> None:
|
|
233
|
++
[full_node_service], [wallet_service], bt = one_wallet_and_one_simulator_services
|
|
234
|
++
full_node_api = full_node_service._api
|
|
235
|
++
full_node_server = full_node_api.full_node.server
|
|
236
|
++
wallet_node = wallet_service._node
|
|
237
|
++
|
|
238
|
++
# Create fake proof
|
|
239
|
++
# Specifically
|
|
240
|
++
fakeproof = HashOnlyProof.from_key_value(
|
|
241
|
++
key=b"key",
|
|
242
|
++
value=b"value",
|
|
243
|
++
node_hash=bytes32([1] * 32),
|
|
244
|
++
layers=[
|
|
245
|
++
ProofLayer(
|
|
246
|
++
other_hash_side=uint8(0),
|
|
247
|
++
other_hash=bytes32([1] * 32),
|
|
248
|
++
combined_hash=bytes32([1] * 32),
|
|
249
|
++
),
|
|
250
|
++
],
|
|
251
|
++
)
|
|
252
|
++
fake_coin_id = bytes32([5] * 32)
|
|
253
|
++
fake_gpr = DLProof(
|
|
254
|
++
store_proofs=StoreProofsHashes(store_id=bytes32([1] * 32), proofs=[fakeproof]),
|
|
255
|
++
coin_id=fake_coin_id,
|
|
256
|
++
inner_puzzle_hash=bytes32([1] * 32),
|
|
257
|
++
)
|
|
258
|
++
|
|
259
|
++
if trusted:
|
|
260
|
++
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
|
|
261
|
++
else:
|
|
262
|
++
wallet_node.config["trusted_peers"] = {}
|
|
263
|
++
|
|
264
|
++
assert wallet_service.rpc_server is not None
|
|
265
|
++
client = await WalletRpcClient.create(
|
|
266
|
++
self_hostname,
|
|
267
|
++
wallet_service.rpc_server.listen_port,
|
|
268
|
++
wallet_service.root_path,
|
|
269
|
++
wallet_service.config,
|
|
270
|
++
)
|
|
271
|
++
|
|
272
|
++
with pytest.raises(ValueError, match="No peer connected"):
|
|
273
|
++
await wallet_service.rpc_server.rpc_api.dl_verify_proof(fake_gpr.to_json_dict())
|
|
274
|
++
|
|
275
|
++
await wallet_node.server.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
276
|
++
await validate_get_routes(client, wallet_service.rpc_server.rpc_api)
|
|
277
|
++
|
|
278
|
++
with pytest.raises(ValueError, match=f"Invalid Proof: No DL singleton found at coin id: {fake_coin_id}"):
|
|
279
|
++
await client.dl_verify_proof(fake_gpr)
|
|
@@@ -1641,6 -1592,6 +1641,45 @@@ async def test_nft_endpoints(wallet_rpc
|
|
|
1641
1641
|
}
|
|
1642
1642
|
|
|
1643
1643
|
|
|
1644
|
++
async def _check_delete_key(
|
|
1645
|
++
client: WalletRpcClient, wallet_node: WalletNode, farmer_fp: int, pool_fp: int, observer: bool = False
|
|
1646
|
++
) -> None:
|
|
1647
|
++
# Add in reward addresses into farmer and pool for testing delete key checks
|
|
1648
|
++
# set farmer to first private key
|
|
1649
|
++
create_sk = master_sk_to_wallet_sk_unhardened if observer else master_sk_to_wallet_sk
|
|
1650
|
++
|
|
1651
|
++
sk = await wallet_node.get_key_for_fingerprint(farmer_fp)
|
|
1652
|
++
assert sk is not None
|
|
1653
|
++
farmer_ph = create_puzzlehash_for_pk(create_sk(sk, uint32(0)).get_g1())
|
|
1654
|
++
|
|
1655
|
++
sk = await wallet_node.get_key_for_fingerprint(pool_fp)
|
|
1656
|
++
assert sk is not None
|
|
1657
|
++
pool_ph = create_puzzlehash_for_pk(create_sk(sk, uint32(0)).get_g1())
|
|
1658
|
++
|
|
1659
|
++
with lock_and_load_config(wallet_node.root_path, "config.yaml") as test_config:
|
|
1660
|
++
test_config["farmer"]["xch_target_address"] = encode_puzzle_hash(farmer_ph, "txch")
|
|
1661
|
++
test_config["pool"]["xch_target_address"] = encode_puzzle_hash(pool_ph, "txch")
|
|
1662
|
++
save_config(wallet_node.root_path, "config.yaml", test_config)
|
|
1663
|
++
|
|
1664
|
++
# Check farmer_fp key
|
|
1665
|
++
sk_dict = await client.check_delete_key(farmer_fp)
|
|
1666
|
++
assert sk_dict["fingerprint"] == farmer_fp
|
|
1667
|
++
assert sk_dict["used_for_farmer_rewards"] is True
|
|
1668
|
++
assert sk_dict["used_for_pool_rewards"] is False
|
|
1669
|
++
|
|
1670
|
++
# Check pool_fp key
|
|
1671
|
++
sk_dict = await client.check_delete_key(pool_fp)
|
|
1672
|
++
assert sk_dict["fingerprint"] == pool_fp
|
|
1673
|
++
assert sk_dict["used_for_farmer_rewards"] is False
|
|
1674
|
++
assert sk_dict["used_for_pool_rewards"] is True
|
|
1675
|
++
|
|
1676
|
++
# Check unknown key
|
|
1677
|
++
sk_dict = await client.check_delete_key(123456, 10)
|
|
1678
|
++
assert sk_dict["fingerprint"] == 123456
|
|
1679
|
++
assert sk_dict["used_for_farmer_rewards"] is False
|
|
1680
|
++
assert sk_dict["used_for_pool_rewards"] is False
|
|
1681
|
++
|
|
1682
|
++
|
|
1644
1683
|
@pytest.mark.limit_consensus_modes(allowed=[ConsensusMode.PLAIN, ConsensusMode.HARD_FORK_2_0], reason="save time")
|
|
1645
1684
|
@pytest.mark.anyio
|
|
1646
1685
|
async def test_key_and_address_endpoints(wallet_rpc_environment: WalletRpcTestEnvironment):
|
|
@@@ -1689,67 -1640,67 +1728,32 @@@
|
|
|
1689
1728
|
sk_dict = await client.get_private_key(pks[1])
|
|
1690
1729
|
assert sk_dict["fingerprint"] == pks[1]
|
|
1691
1730
|
|
|
1692
|
--
#
|
|
1693
|
--
|
|
1694
|
--
|
|
1695
|
--
|
|
1696
|
--
|
|
1731
|
++
# test hardened keys
|
|
1732
|
++
await _check_delete_key(client=client, wallet_node=wallet_node, farmer_fp=pks[0], pool_fp=pks[1], observer=False)
|
|
1733
|
++
|
|
1734
|
++
# test observer keys
|
|
1735
|
++
await _check_delete_key(client=client, wallet_node=wallet_node, farmer_fp=pks[0], pool_fp=pks[1], observer=True)
|
|
1736
|
++
|
|
1737
|
++
# set farmer to empty string
|
|
1697
1738
|
with lock_and_load_config(wallet_node.root_path, "config.yaml") as test_config:
|
|
1698
|
--
test_config["farmer"]["xch_target_address"] =
|
|
1699
|
--
# set pool to second private key
|
|
1700
|
--
sk = await wallet_node.get_key_for_fingerprint(pks[1])
|
|
1701
|
--
assert sk is not None
|
|
1702
|
--
test_ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(0)).get_g1())
|
|
1703
|
--
test_config["pool"]["xch_target_address"] = encode_puzzle_hash(test_ph, "txch")
|
|
1739
|
++
test_config["farmer"]["xch_target_address"] = ""
|
|
1704
1740
|
save_config(wallet_node.root_path, "config.yaml", test_config)
|
|
1705
1741
|
|
|
1706
|
--
# Check
|
|
1707
|
--
sk_dict = await client.check_delete_key(pks[0])
|
|
1708
|
--
assert sk_dict["fingerprint"] == pks[0]
|
|
1709
|
--
assert sk_dict["used_for_farmer_rewards"] is True
|
|
1710
|
--
assert sk_dict["used_for_pool_rewards"] is False
|
|
1711
|
--
|
|
1712
|
--
# Check second key
|
|
1742
|
++
# Check key
|
|
1713
1743
|
sk_dict = await client.check_delete_key(pks[1])
|
|
1714
1744
|
assert sk_dict["fingerprint"] == pks[1]
|
|
1715
1745
|
assert sk_dict["used_for_farmer_rewards"] is False
|
|
1716
1746
|
assert sk_dict["used_for_pool_rewards"] is True
|
|
1717
1747
|
|
|
1718
|
--
#
|
|
1719
|
--
sk_dict = await client.check_delete_key(123456, 10)
|
|
1720
|
--
assert sk_dict["fingerprint"] == 123456
|
|
1721
|
--
assert sk_dict["used_for_farmer_rewards"] is False
|
|
1722
|
--
assert sk_dict["used_for_pool_rewards"] is False
|
|
1723
|
--
|
|
1724
|
--
# Add in observer reward addresses into farmer and pool for testing delete key checks
|
|
1725
|
--
# set farmer to first private key
|
|
1726
|
--
sk = await wallet_node.get_key_for_fingerprint(pks[0])
|
|
1727
|
--
assert sk is not None
|
|
1728
|
--
test_ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk_unhardened(sk, uint32(0)).get_g1())
|
|
1748
|
++
# set farmer and pool to empty string
|
|
1729
1749
|
with lock_and_load_config(wallet_node.root_path, "config.yaml") as test_config:
|
|
1730
|
--
test_config["farmer"]["xch_target_address"] =
|
|
1731
|
--
|
|
1732
|
--
sk = await wallet_node.get_key_for_fingerprint(pks[1])
|
|
1733
|
--
assert sk is not None
|
|
1734
|
--
test_ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk_unhardened(sk, uint32(0)).get_g1())
|
|
1735
|
--
test_config["pool"]["xch_target_address"] = encode_puzzle_hash(test_ph, "txch")
|
|
1750
|
++
test_config["farmer"]["xch_target_address"] = ""
|
|
1751
|
++
test_config["pool"]["xch_target_address"] = ""
|
|
1736
1752
|
save_config(wallet_node.root_path, "config.yaml", test_config)
|
|
1737
1753
|
|
|
1738
|
--
# Check
|
|
1754
|
++
# Check key
|
|
1739
1755
|
sk_dict = await client.check_delete_key(pks[0])
|
|
1740
1756
|
assert sk_dict["fingerprint"] == pks[0]
|
|
1741
|
--
assert sk_dict["used_for_farmer_rewards"] is True
|
|
1742
|
--
assert sk_dict["used_for_pool_rewards"] is False
|
|
1743
|
--
|
|
1744
|
--
# Check second key
|
|
1745
|
--
sk_dict = await client.check_delete_key(pks[1])
|
|
1746
|
--
assert sk_dict["fingerprint"] == pks[1]
|
|
1747
|
--
assert sk_dict["used_for_farmer_rewards"] is False
|
|
1748
|
--
assert sk_dict["used_for_pool_rewards"] is True
|
|
1749
|
--
|
|
1750
|
--
# Check unknown key
|
|
1751
|
--
sk_dict = await client.check_delete_key(123456, 10)
|
|
1752
|
--
assert sk_dict["fingerprint"] == 123456
|
|
1753
1757
|
assert sk_dict["used_for_farmer_rewards"] is False
|
|
1754
1758
|
assert sk_dict["used_for_pool_rewards"] is False
|
|
1755
1759
|
|
|
@@@ -40,595 -50,486 +40,595 @@@ async def get_all_messages_in_queue(que
|
|
|
40
40
|
return all_messages
|
|
41
41
|
|
|
42
42
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
fn_server = full_node_api.full_node.server
|
|
51
|
-
wsm: WalletStateManager = wallet_node.wallet_state_manager
|
|
52
|
-
|
|
53
|
-
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
54
|
-
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
55
|
-
|
|
56
|
-
zero_ph = 32 * b"\0"
|
|
57
|
-
junk_ph = 32 * b"\a"
|
|
58
|
-
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
59
|
-
msg = wallet_protocol.RegisterForPhUpdates([zero_ph], 0)
|
|
60
|
-
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
61
|
-
|
|
62
|
-
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
63
|
-
data_response: RespondToPhUpdates = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
64
|
-
assert data_response.coin_states == []
|
|
65
|
-
|
|
66
|
-
# Farm few more with reward
|
|
67
|
-
for i in range(0, num_blocks):
|
|
68
|
-
if i == num_blocks - 1:
|
|
69
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
70
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
71
|
-
else:
|
|
72
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
73
|
-
|
|
74
|
-
msg = wallet_protocol.RegisterForPhUpdates([zero_ph], 0)
|
|
75
|
-
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
76
|
-
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
77
|
-
data_response: RespondToPhUpdates = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
78
|
-
# we have already subscribed to this puzzle hash, it will be ignored
|
|
79
|
-
# we still receive the updates (see below)
|
|
80
|
-
assert data_response.coin_states == []
|
|
81
|
-
|
|
82
|
-
# Farm more rewards to check the incoming queue for the updates
|
|
83
|
-
for i in range(0, num_blocks):
|
|
84
|
-
if i == num_blocks - 1:
|
|
85
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
86
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
87
|
-
else:
|
|
88
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
89
|
-
|
|
90
|
-
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
91
|
-
|
|
92
|
-
zero_coin = await full_node_api.full_node.coin_store.get_coin_states_by_puzzle_hashes(True, {zero_ph})
|
|
93
|
-
all_zero_coin = set(zero_coin)
|
|
94
|
-
notified_zero_coins = set()
|
|
95
|
-
|
|
96
|
-
for message in all_messages:
|
|
97
|
-
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
98
|
-
data_response: CoinStateUpdate = CoinStateUpdate.from_bytes(message.data)
|
|
99
|
-
for coin_state in data_response.items:
|
|
100
|
-
notified_zero_coins.add(coin_state)
|
|
101
|
-
assert len(data_response.items) == 2 # 2 per height farmer / pool reward
|
|
102
|
-
|
|
103
|
-
assert all_zero_coin == notified_zero_coins
|
|
104
|
-
|
|
105
|
-
# Test subscribing to more coins
|
|
106
|
-
one_ph = 32 * b"\1"
|
|
107
|
-
msg = wallet_protocol.RegisterForPhUpdates([one_ph], 0)
|
|
108
|
-
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
109
|
-
peak = full_node_api.full_node.blockchain.get_peak()
|
|
110
|
-
|
|
111
|
-
for i in range(0, num_blocks):
|
|
112
|
-
if i == num_blocks - 1:
|
|
113
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
114
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
115
|
-
else:
|
|
116
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
117
|
-
|
|
118
|
-
for i in range(0, num_blocks):
|
|
119
|
-
if i == num_blocks - 1:
|
|
120
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(one_ph))
|
|
121
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
122
|
-
else:
|
|
123
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(one_ph))
|
|
124
|
-
|
|
125
|
-
zero_coins = await full_node_api.full_node.coin_store.get_coin_states_by_puzzle_hashes(
|
|
126
|
-
True, {zero_ph}, peak.height + 1
|
|
127
|
-
)
|
|
128
|
-
one_coins = await full_node_api.full_node.coin_store.get_coin_states_by_puzzle_hashes(True, {one_ph})
|
|
129
|
-
|
|
130
|
-
all_coins = set(zero_coins)
|
|
131
|
-
all_coins.update(one_coins)
|
|
132
|
-
|
|
133
|
-
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
134
|
-
|
|
135
|
-
notified_all_coins = set()
|
|
136
|
-
|
|
137
|
-
for message in all_messages:
|
|
138
|
-
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
139
|
-
data_response: CoinStateUpdate = CoinStateUpdate.from_bytes(message.data)
|
|
140
|
-
for coin_state in data_response.items:
|
|
141
|
-
notified_all_coins.add(coin_state)
|
|
142
|
-
assert len(data_response.items) == 2 # 2 per height farmer / pool reward
|
|
143
|
-
|
|
144
|
-
assert all_coins == notified_all_coins
|
|
43
|
+
@pytest.mark.anyio
|
|
44
|
+
async def test_subscribe_for_ph(simulator_and_wallet: OldSimulatorsAndWallets, self_hostname: str) -> None:
|
|
45
|
+
num_blocks = 4
|
|
46
|
+
full_nodes, wallets, _ = simulator_and_wallet
|
|
47
|
+
full_node_api = full_nodes[0]
|
|
48
|
+
wallet_node, server_2 = wallets[0]
|
|
49
|
+
fn_server = full_node_api.full_node.server
|
|
145
50
|
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
puzzle_hash = await wallet.get_new_puzzlehash()
|
|
51
|
+
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
52
|
+
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
149
53
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
else:
|
|
155
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(puzzle_hash))
|
|
54
|
+
junk_ph = bytes32(32 * b"\a")
|
|
55
|
+
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
56
|
+
msg = wallet_protocol.RegisterForPhUpdates([zero_ph], uint32(0))
|
|
57
|
+
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
156
58
|
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
for i in range(1, num_blocks + 1)
|
|
161
|
-
]
|
|
162
|
-
)
|
|
163
|
-
fn_amount = sum(
|
|
164
|
-
cr.coin.amount
|
|
165
|
-
for cr in await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(False, puzzle_hash)
|
|
166
|
-
)
|
|
59
|
+
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
60
|
+
data_response = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
61
|
+
assert data_response.coin_states == []
|
|
167
62
|
|
|
168
|
-
|
|
169
|
-
|
|
63
|
+
# Farm few more with reward
|
|
64
|
+
for i in range(num_blocks):
|
|
65
|
+
if i == num_blocks - 1:
|
|
66
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
67
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
68
|
+
else:
|
|
69
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
170
70
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
71
|
+
msg = wallet_protocol.RegisterForPhUpdates([zero_ph], uint32(0))
|
|
72
|
+
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
73
|
+
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
74
|
+
data_response = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
75
|
+
# we have already subscribed to this puzzle hash, it will be ignored
|
|
76
|
+
# we still receive the updates (see below)
|
|
77
|
+
assert data_response.coin_states == []
|
|
78
|
+
|
|
79
|
+
# Farm more rewards to check the incoming queue for the updates
|
|
80
|
+
for i in range(num_blocks):
|
|
81
|
+
if i == num_blocks - 1:
|
|
82
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
83
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
84
|
+
else:
|
|
85
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
176
86
|
|
|
177
|
-
|
|
87
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
178
88
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
assert spent_coin.puzzle_hash == puzzle_hash
|
|
89
|
+
zero_coin = await full_node_api.full_node.coin_store.get_coin_states_by_puzzle_hashes(True, {zero_ph})
|
|
90
|
+
all_zero_coin = set(zero_coin)
|
|
91
|
+
notified_zero_coins = set()
|
|
183
92
|
|
|
184
|
-
|
|
93
|
+
for message in all_messages:
|
|
94
|
+
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
95
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
96
|
+
assert len(coin_state_update.items) == 2 # 2 per height farmer / pool reward
|
|
97
|
+
for coin_state in coin_state_update.items:
|
|
98
|
+
notified_zero_coins.add(coin_state)
|
|
185
99
|
|
|
186
|
-
|
|
100
|
+
assert all_zero_coin == notified_zero_coins
|
|
187
101
|
|
|
188
|
-
|
|
189
|
-
|
|
102
|
+
# Test subscribing to more coins
|
|
103
|
+
one_ph = bytes32(32 * b"\1")
|
|
104
|
+
msg = wallet_protocol.RegisterForPhUpdates([one_ph], uint32(0))
|
|
105
|
+
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
106
|
+
peak = full_node_api.full_node.blockchain.get_peak()
|
|
190
107
|
|
|
191
|
-
|
|
108
|
+
for i in range(num_blocks):
|
|
109
|
+
if i == num_blocks - 1:
|
|
110
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
111
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
112
|
+
else:
|
|
113
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
192
114
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
115
|
+
for i in range(num_blocks):
|
|
116
|
+
if i == num_blocks - 1:
|
|
117
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(one_ph))
|
|
118
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
119
|
+
else:
|
|
120
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(one_ph))
|
|
197
121
|
|
|
198
|
-
|
|
122
|
+
assert peak is not None
|
|
123
|
+
zero_coins = await full_node_api.full_node.coin_store.get_coin_states_by_puzzle_hashes(
|
|
124
|
+
True, {zero_ph}, uint32(peak.height + 1)
|
|
125
|
+
)
|
|
126
|
+
one_coins = await full_node_api.full_node.coin_store.get_coin_states_by_puzzle_hashes(True, {one_ph})
|
|
199
127
|
|
|
200
|
-
|
|
128
|
+
all_coins = set(zero_coins)
|
|
129
|
+
all_coins.update(one_coins)
|
|
201
130
|
|
|
202
|
-
|
|
203
|
-
[tx_record] = await wallet.generate_signed_transaction(uint64(10), junk_ph, DEFAULT_TX_CONFIG, uint64(0))
|
|
204
|
-
await wallet_node.wallet_state_manager.add_pending_transactions([tx_record])
|
|
131
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
205
132
|
|
|
206
|
-
|
|
133
|
+
notified_all_coins = set()
|
|
207
134
|
|
|
208
|
-
|
|
135
|
+
for message in all_messages:
|
|
136
|
+
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
137
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
138
|
+
assert len(coin_state_update.items) == 2 # 2 per height farmer / pool reward
|
|
139
|
+
for coin_state in coin_state_update.items:
|
|
140
|
+
notified_all_coins.add(coin_state)
|
|
209
141
|
|
|
210
|
-
|
|
142
|
+
assert all_coins == notified_all_coins
|
|
211
143
|
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
for coin_state in data_response.items:
|
|
216
|
-
if coin_state.coin.name() == spent_coin.name():
|
|
217
|
-
notified_state = coin_state
|
|
144
|
+
wallet = wallet_node.wallet_state_manager.wallets[uint32(1)]
|
|
145
|
+
assert isinstance(wallet, Wallet)
|
|
146
|
+
puzzle_hash = await wallet.get_new_puzzlehash()
|
|
218
147
|
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
async def test_subscribe_for_coin_id(self, simulator_and_wallet, self_hostname):
|
|
225
|
-
num_blocks = 4
|
|
226
|
-
full_nodes, wallets, _ = simulator_and_wallet
|
|
227
|
-
full_node_api = full_nodes[0]
|
|
228
|
-
wallet_node, server_2 = wallets[0]
|
|
229
|
-
fn_server = full_node_api.full_node.server
|
|
230
|
-
wsm: WalletStateManager = wallet_node.wallet_state_manager
|
|
231
|
-
standard_wallet: Wallet = wsm.wallets[1]
|
|
232
|
-
puzzle_hash = await standard_wallet.get_new_puzzlehash()
|
|
233
|
-
|
|
234
|
-
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
235
|
-
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
236
|
-
|
|
237
|
-
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
238
|
-
|
|
239
|
-
# Farm to create a coin that we'll track
|
|
240
|
-
for i in range(0, num_blocks):
|
|
148
|
+
for i in range(num_blocks):
|
|
149
|
+
if i == num_blocks - 1:
|
|
150
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(puzzle_hash))
|
|
151
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(junk_ph))
|
|
152
|
+
else:
|
|
241
153
|
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(puzzle_hash))
|
|
242
154
|
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
155
|
+
funds = sum(
|
|
156
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks + 1)]
|
|
157
|
+
)
|
|
158
|
+
fn_amount = sum(
|
|
159
|
+
cr.coin.amount
|
|
160
|
+
for cr in await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(False, puzzle_hash)
|
|
161
|
+
)
|
|
246
162
|
|
|
247
|
-
|
|
163
|
+
await time_out_assert(20, wallet.get_confirmed_balance, funds)
|
|
164
|
+
assert funds == fn_amount
|
|
248
165
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
166
|
+
msg_1 = wallet_protocol.RegisterForPhUpdates([puzzle_hash], uint32(0))
|
|
167
|
+
msg_response_1 = await full_node_api.register_interest_in_puzzle_hash(msg_1, fake_wallet_peer)
|
|
168
|
+
assert msg_response_1.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
169
|
+
data_response_1 = RespondToCoinUpdates.from_bytes(msg_response_1.data)
|
|
170
|
+
assert len(data_response_1.coin_states) == 2 * num_blocks # 2 per height farmer / pool reward
|
|
253
171
|
|
|
254
|
-
|
|
255
|
-
msg_response = await full_node_api.register_interest_in_coin(msg, fake_wallet_peer)
|
|
256
|
-
assert msg_response is not None
|
|
257
|
-
assert msg_response.type == ProtocolMessageTypes.respond_to_coin_update.value
|
|
258
|
-
data_response: RespondToCoinUpdates = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
259
|
-
assert data_response.coin_states[0].coin == coin_to_spend
|
|
172
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
260
173
|
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
await standard_wallet.wallet_state_manager.add_pending_transactions([tx_record])
|
|
174
|
+
[tx_record] = await wallet.generate_signed_transaction(uint64(10), puzzle_hash, DEFAULT_TX_CONFIG, uint64(0))
|
|
175
|
+
assert tx_record.spend_bundle is not None
|
|
176
|
+
assert len(tx_record.spend_bundle.removals()) == 1
|
|
177
|
+
spent_coin = tx_record.spend_bundle.removals()[0]
|
|
178
|
+
assert spent_coin.puzzle_hash == puzzle_hash
|
|
267
179
|
|
|
268
|
-
|
|
180
|
+
[tx_record] = await wallet.wallet_state_manager.add_pending_transactions([tx_record])
|
|
269
181
|
|
|
270
|
-
|
|
182
|
+
await full_node_api.process_transaction_records(records=[tx_record])
|
|
271
183
|
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
275
|
-
data_response: CoinStateUpdate = CoinStateUpdate.from_bytes(message.data)
|
|
276
|
-
for coin_state in data_response.items:
|
|
277
|
-
notified_coins.add(coin_state.coin)
|
|
278
|
-
assert coin_state.spent_height is not None
|
|
184
|
+
# Let's make sure the wallet can handle a non ephemeral launcher
|
|
185
|
+
from chia.wallet.puzzles.singleton_top_layer import SINGLETON_LAUNCHER_HASH
|
|
279
186
|
|
|
280
|
-
|
|
187
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
281
188
|
|
|
282
|
-
|
|
283
|
-
|
|
189
|
+
[tx_record] = await wallet.generate_signed_transaction(
|
|
190
|
+
uint64(10), SINGLETON_LAUNCHER_HASH, DEFAULT_TX_CONFIG, uint64(0)
|
|
191
|
+
)
|
|
192
|
+
[tx_record] = await wallet.wallet_state_manager.add_pending_transactions([tx_record])
|
|
284
193
|
|
|
285
|
-
|
|
286
|
-
uint64(10), puzzle_hash, DEFAULT_TX_CONFIG, uint64(0)
|
|
287
|
-
)
|
|
194
|
+
await full_node_api.process_transaction_records(records=[tx_record])
|
|
288
195
|
|
|
289
|
-
|
|
196
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
290
197
|
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
added_target = coin
|
|
198
|
+
# Send a transaction to make sure the wallet is still running
|
|
199
|
+
[tx_record] = await wallet.generate_signed_transaction(uint64(10), junk_ph, DEFAULT_TX_CONFIG, uint64(0))
|
|
200
|
+
[tx_record] = await wallet.wallet_state_manager.add_pending_transactions([tx_record])
|
|
295
201
|
|
|
296
|
-
|
|
202
|
+
await full_node_api.process_transaction_records(records=[tx_record])
|
|
297
203
|
|
|
298
|
-
|
|
299
|
-
msg_response = await full_node_api.register_interest_in_coin(msg, fake_wallet_peer)
|
|
300
|
-
assert msg_response is not None
|
|
301
|
-
assert msg_response.type == ProtocolMessageTypes.respond_to_coin_update.value
|
|
302
|
-
data_response: RespondToCoinUpdates = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
303
|
-
assert len(data_response.coin_states) == 0
|
|
204
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
304
205
|
|
|
305
|
-
|
|
206
|
+
notified_state = None
|
|
306
207
|
|
|
307
|
-
|
|
208
|
+
for message in all_messages:
|
|
209
|
+
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
210
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
211
|
+
for coin_state in coin_state_update.items:
|
|
212
|
+
if coin_state.coin.name() == spent_coin.name():
|
|
213
|
+
notified_state = coin_state
|
|
308
214
|
|
|
309
|
-
|
|
215
|
+
assert notified_state is not None
|
|
216
|
+
assert notified_state.coin == spent_coin
|
|
217
|
+
assert notified_state.spent_height is not None
|
|
310
218
|
|
|
311
|
-
notified_state = None
|
|
312
219
|
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
220
|
+
@pytest.mark.anyio
|
|
221
|
+
async def test_subscribe_for_coin_id(simulator_and_wallet: OldSimulatorsAndWallets, self_hostname: str) -> None:
|
|
222
|
+
num_blocks = 4
|
|
223
|
+
full_nodes, wallets, _ = simulator_and_wallet
|
|
224
|
+
full_node_api = full_nodes[0]
|
|
225
|
+
wallet_node, server_2 = wallets[0]
|
|
226
|
+
fn_server = full_node_api.full_node.server
|
|
227
|
+
standard_wallet = wallet_node.wallet_state_manager.wallets[uint32(1)]
|
|
228
|
+
assert isinstance(standard_wallet, Wallet)
|
|
229
|
+
puzzle_hash = await standard_wallet.get_new_puzzlehash()
|
|
319
230
|
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
assert notified_state.spent_height is None
|
|
323
|
-
|
|
324
|
-
@pytest.mark.anyio
|
|
325
|
-
async def test_subscribe_for_ph_reorg(self, simulator_and_wallet, self_hostname):
|
|
326
|
-
num_blocks = 4
|
|
327
|
-
long_blocks = 20
|
|
328
|
-
full_nodes, wallets, _ = simulator_and_wallet
|
|
329
|
-
full_node_api = full_nodes[0]
|
|
330
|
-
wallet_node, server_2 = wallets[0]
|
|
331
|
-
fn_server = full_node_api.full_node.server
|
|
332
|
-
wsm: WalletStateManager = wallet_node.wallet_state_manager
|
|
333
|
-
standard_wallet: Wallet = wsm.wallets[1]
|
|
334
|
-
puzzle_hash = await standard_wallet.get_new_puzzlehash()
|
|
335
|
-
|
|
336
|
-
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
337
|
-
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
338
|
-
|
|
339
|
-
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
340
|
-
zero_ph = 32 * b"\0"
|
|
341
|
-
|
|
342
|
-
# Farm to create a coin that we'll track
|
|
343
|
-
for i in range(0, num_blocks):
|
|
344
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
231
|
+
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
232
|
+
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
345
233
|
|
|
346
|
-
|
|
347
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
234
|
+
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
348
235
|
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
assert msg_response is not None
|
|
236
|
+
# Farm to create a coin that we'll track
|
|
237
|
+
for _ in range(num_blocks):
|
|
352
238
|
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(puzzle_hash))
|
|
353
239
|
|
|
354
|
-
|
|
355
|
-
|
|
240
|
+
funds = sum(
|
|
241
|
+
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
|
|
242
|
+
)
|
|
356
243
|
|
|
357
|
-
|
|
358
|
-
await time_out_assert(20, full_node_api.full_node.blockchain.get_peak_height, expected_height)
|
|
244
|
+
await time_out_assert(20, standard_wallet.get_confirmed_balance, funds)
|
|
359
245
|
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
fork_height = expected_height - num_blocks - 5
|
|
363
|
-
req = ReorgProtocol(fork_height, expected_height + 5, zero_ph, None)
|
|
364
|
-
await full_node_api.reorg_from_index_to_new_index(req)
|
|
246
|
+
my_coins = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(True, puzzle_hash)
|
|
247
|
+
coin_to_spend = my_coins[0].coin
|
|
365
248
|
|
|
366
|
-
|
|
367
|
-
|
|
249
|
+
msg = wallet_protocol.RegisterForCoinUpdates([coin_to_spend.name()], uint32(0))
|
|
250
|
+
msg_response = await full_node_api.register_interest_in_coin(msg, fake_wallet_peer)
|
|
251
|
+
assert msg_response is not None
|
|
252
|
+
assert msg_response.type == ProtocolMessageTypes.respond_to_coin_update.value
|
|
253
|
+
data_response = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
254
|
+
assert data_response.coin_states[0].coin == coin_to_spend
|
|
368
255
|
|
|
369
|
-
|
|
256
|
+
coins = set()
|
|
257
|
+
coins.add(coin_to_spend)
|
|
258
|
+
[tx_record] = await standard_wallet.generate_signed_transaction(
|
|
259
|
+
uint64(10), puzzle_hash, DEFAULT_TX_CONFIG, uint64(0), coins=coins
|
|
260
|
+
)
|
|
261
|
+
[tx_record] = await standard_wallet.wallet_state_manager.add_pending_transactions([tx_record])
|
|
370
262
|
|
|
371
|
-
|
|
372
|
-
for message in all_messages:
|
|
373
|
-
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
374
|
-
data_response: CoinStateUpdate = CoinStateUpdate.from_bytes(message.data)
|
|
375
|
-
coin_update_messages.append(data_response)
|
|
376
|
-
|
|
377
|
-
# First state is creation, second one is a reorg
|
|
378
|
-
assert len(coin_update_messages) == 2
|
|
379
|
-
first = coin_update_messages[0]
|
|
380
|
-
|
|
381
|
-
assert len(first.items) == 2
|
|
382
|
-
first_state_coin_1 = first.items[0]
|
|
383
|
-
assert first_state_coin_1.spent_height is None
|
|
384
|
-
assert first_state_coin_1.created_height is not None
|
|
385
|
-
first_state_coin_2 = first.items[1]
|
|
386
|
-
assert first_state_coin_2.spent_height is None
|
|
387
|
-
assert first_state_coin_2.created_height is not None
|
|
388
|
-
|
|
389
|
-
second = coin_update_messages[1]
|
|
390
|
-
assert second.fork_height == fork_height
|
|
391
|
-
assert len(second.items) == 2
|
|
392
|
-
second_state_coin_1 = second.items[0]
|
|
393
|
-
assert second_state_coin_1.spent_height is None
|
|
394
|
-
assert second_state_coin_1.created_height is None
|
|
395
|
-
second_state_coin_2 = second.items[1]
|
|
396
|
-
assert second_state_coin_2.spent_height is None
|
|
397
|
-
assert second_state_coin_2.created_height is None
|
|
398
|
-
|
|
399
|
-
@pytest.mark.anyio
|
|
400
|
-
async def test_subscribe_for_coin_id_reorg(self, simulator_and_wallet, self_hostname):
|
|
401
|
-
num_blocks = 4
|
|
402
|
-
long_blocks = 20
|
|
403
|
-
full_nodes, wallets, _ = simulator_and_wallet
|
|
404
|
-
full_node_api = full_nodes[0]
|
|
405
|
-
wallet_node, server_2 = wallets[0]
|
|
406
|
-
fn_server = full_node_api.full_node.server
|
|
407
|
-
wsm: WalletStateManager = wallet_node.wallet_state_manager
|
|
408
|
-
standard_wallet: Wallet = wsm.wallets[1]
|
|
409
|
-
puzzle_hash = await standard_wallet.get_new_puzzlehash()
|
|
410
|
-
|
|
411
|
-
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
412
|
-
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
413
|
-
|
|
414
|
-
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
415
|
-
zero_ph = 32 * b"\0"
|
|
416
|
-
|
|
417
|
-
# Farm to create a coin that we'll track
|
|
418
|
-
for i in range(0, num_blocks):
|
|
419
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
263
|
+
await full_node_api.process_transaction_records(records=[tx_record])
|
|
420
264
|
|
|
421
|
-
|
|
422
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
265
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
423
266
|
|
|
424
|
-
|
|
267
|
+
notified_coins = set()
|
|
268
|
+
for message in all_messages:
|
|
269
|
+
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
270
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
271
|
+
for coin_state in coin_state_update.items:
|
|
272
|
+
notified_coins.add(coin_state.coin)
|
|
273
|
+
assert coin_state.spent_height is not None
|
|
425
274
|
|
|
426
|
-
|
|
427
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
275
|
+
assert notified_coins == coins
|
|
428
276
|
|
|
429
|
-
|
|
430
|
-
|
|
277
|
+
# Test getting notification for coin that is about to be created
|
|
278
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
431
279
|
|
|
432
|
-
|
|
433
|
-
|
|
280
|
+
[tx_record] = await standard_wallet.generate_signed_transaction(
|
|
281
|
+
uint64(10), puzzle_hash, DEFAULT_TX_CONFIG, uint64(0)
|
|
282
|
+
)
|
|
434
283
|
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
284
|
+
added_target = None
|
|
285
|
+
assert tx_record.spend_bundle is not None
|
|
286
|
+
for coin in tx_record.spend_bundle.additions():
|
|
287
|
+
if coin.puzzle_hash == puzzle_hash:
|
|
288
|
+
added_target = coin
|
|
289
|
+
|
|
290
|
+
assert added_target is not None
|
|
291
|
+
|
|
292
|
+
msg = wallet_protocol.RegisterForCoinUpdates([added_target.name()], uint32(0))
|
|
293
|
+
msg_response = await full_node_api.register_interest_in_coin(msg, fake_wallet_peer)
|
|
294
|
+
assert msg_response is not None
|
|
295
|
+
assert msg_response.type == ProtocolMessageTypes.respond_to_coin_update.value
|
|
296
|
+
data_response = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
297
|
+
assert len(data_response.coin_states) == 0
|
|
298
|
+
|
|
299
|
+
[tx_record] = await standard_wallet.wallet_state_manager.add_pending_transactions([tx_record])
|
|
300
|
+
|
|
301
|
+
await full_node_api.process_transaction_records(records=[tx_record])
|
|
302
|
+
|
|
303
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
304
|
+
|
|
305
|
+
notified_state = None
|
|
306
|
+
|
|
307
|
+
for message in all_messages:
|
|
308
|
+
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
309
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
310
|
+
for coin_state in coin_state_update.items:
|
|
311
|
+
if coin_state.coin.name() == added_target.name():
|
|
312
|
+
notified_state = coin_state
|
|
313
|
+
|
|
314
|
+
assert notified_state is not None
|
|
315
|
+
assert notified_state.coin == added_target
|
|
316
|
+
assert notified_state.spent_height is None
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
@pytest.mark.anyio
|
|
320
|
+
async def test_subscribe_for_ph_reorg(simulator_and_wallet: OldSimulatorsAndWallets, self_hostname: str) -> None:
|
|
321
|
+
num_blocks = 4
|
|
322
|
+
long_blocks = 20
|
|
323
|
+
full_nodes, wallets, _ = simulator_and_wallet
|
|
324
|
+
full_node_api = full_nodes[0]
|
|
325
|
+
wallet_node, server_2 = wallets[0]
|
|
326
|
+
fn_server = full_node_api.full_node.server
|
|
327
|
+
standard_wallet = wallet_node.wallet_state_manager.wallets[uint32(1)]
|
|
328
|
+
assert isinstance(standard_wallet, Wallet)
|
|
329
|
+
puzzle_hash = await standard_wallet.get_new_puzzlehash()
|
|
330
|
+
|
|
331
|
+
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
332
|
+
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
333
|
+
|
|
334
|
+
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
335
|
+
|
|
336
|
+
# Farm to create a coin that we'll track
|
|
337
|
+
for _ in range(num_blocks):
|
|
338
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
339
|
+
|
|
340
|
+
for _ in range(long_blocks):
|
|
341
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
342
|
+
|
|
343
|
+
msg = wallet_protocol.RegisterForPhUpdates([puzzle_hash], uint32(0))
|
|
344
|
+
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
345
|
+
assert msg_response is not None
|
|
346
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(puzzle_hash))
|
|
347
|
+
|
|
348
|
+
for _ in range(num_blocks):
|
|
349
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
350
|
+
|
|
351
|
+
expected_height = uint32(long_blocks + 2 * num_blocks + 1)
|
|
352
|
+
await time_out_assert(20, full_node_api.full_node.blockchain.get_peak_height, expected_height)
|
|
353
|
+
|
|
354
|
+
coin_records = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(True, puzzle_hash)
|
|
355
|
+
assert len(coin_records) > 0
|
|
356
|
+
fork_height = uint32(expected_height - num_blocks - 5)
|
|
357
|
+
req = ReorgProtocol(fork_height, uint32(expected_height + 5), zero_ph, None)
|
|
358
|
+
await full_node_api.reorg_from_index_to_new_index(req)
|
|
359
|
+
|
|
360
|
+
coin_records = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(True, puzzle_hash)
|
|
361
|
+
assert coin_records == []
|
|
439
362
|
|
|
440
|
-
|
|
441
|
-
req = ReorgProtocol(fork_height, expected_height + 5, zero_ph, None)
|
|
442
|
-
await full_node_api.reorg_from_index_to_new_index(req)
|
|
363
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
443
364
|
|
|
444
|
-
|
|
445
|
-
|
|
365
|
+
coin_update_messages: List[CoinStateUpdate] = []
|
|
366
|
+
for message in all_messages:
|
|
367
|
+
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
368
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
369
|
+
coin_update_messages.append(coin_state_update)
|
|
370
|
+
|
|
371
|
+
# First state is creation, second one is a reorg
|
|
372
|
+
assert len(coin_update_messages) == 2
|
|
373
|
+
first = coin_update_messages[0]
|
|
446
374
|
|
|
447
|
-
|
|
375
|
+
assert len(first.items) == 2
|
|
376
|
+
first_state_coin_1 = first.items[0]
|
|
377
|
+
assert first_state_coin_1.spent_height is None
|
|
378
|
+
assert first_state_coin_1.created_height is not None
|
|
379
|
+
first_state_coin_2 = first.items[1]
|
|
380
|
+
assert first_state_coin_2.spent_height is None
|
|
381
|
+
assert first_state_coin_2.created_height is not None
|
|
382
|
+
|
|
383
|
+
second = coin_update_messages[1]
|
|
384
|
+
assert second.fork_height == fork_height
|
|
385
|
+
assert len(second.items) == 2
|
|
386
|
+
second_state_coin_1 = second.items[0]
|
|
387
|
+
assert second_state_coin_1.spent_height is None
|
|
388
|
+
assert second_state_coin_1.created_height is None
|
|
389
|
+
second_state_coin_2 = second.items[1]
|
|
390
|
+
assert second_state_coin_2.spent_height is None
|
|
391
|
+
assert second_state_coin_2.created_height is None
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
@pytest.mark.anyio
|
|
395
|
+
async def test_subscribe_for_coin_id_reorg(simulator_and_wallet: OldSimulatorsAndWallets, self_hostname: str) -> None:
|
|
396
|
+
num_blocks = 4
|
|
397
|
+
long_blocks = 20
|
|
398
|
+
full_nodes, wallets, _ = simulator_and_wallet
|
|
399
|
+
full_node_api = full_nodes[0]
|
|
400
|
+
wallet_node, server_2 = wallets[0]
|
|
401
|
+
fn_server = full_node_api.full_node.server
|
|
402
|
+
standard_wallet = wallet_node.wallet_state_manager.wallets[uint32(1)]
|
|
403
|
+
assert isinstance(standard_wallet, Wallet)
|
|
404
|
+
puzzle_hash = await standard_wallet.get_new_puzzlehash()
|
|
405
|
+
|
|
406
|
+
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
407
|
+
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
408
|
+
|
|
409
|
+
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
410
|
+
|
|
411
|
+
# Farm to create a coin that we'll track
|
|
412
|
+
for _ in range(num_blocks):
|
|
413
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
414
|
+
|
|
415
|
+
for _ in range(long_blocks):
|
|
416
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
417
|
+
|
|
418
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(puzzle_hash))
|
|
419
|
+
|
|
420
|
+
for _ in range(num_blocks):
|
|
421
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(zero_ph))
|
|
422
|
+
|
|
423
|
+
expected_height = uint32(long_blocks + 2 * num_blocks + 1)
|
|
424
|
+
await time_out_assert(20, full_node_api.full_node.blockchain.get_peak_height, expected_height)
|
|
425
|
+
|
|
426
|
+
coin_records = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(True, puzzle_hash)
|
|
427
|
+
assert len(coin_records) > 0
|
|
448
428
|
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
coin_update_messages.append(data_response)
|
|
454
|
-
|
|
455
|
-
assert len(coin_update_messages) == 1
|
|
456
|
-
update = coin_update_messages[0]
|
|
457
|
-
coin_states = update.items
|
|
458
|
-
assert len(coin_states) == 2
|
|
459
|
-
first_coin = coin_states[0]
|
|
460
|
-
assert first_coin.spent_height is None
|
|
461
|
-
assert first_coin.created_height is None
|
|
462
|
-
second_coin = coin_states[1]
|
|
463
|
-
assert second_coin.spent_height is None
|
|
464
|
-
assert second_coin.created_height is None
|
|
465
|
-
|
|
466
|
-
@pytest.mark.anyio
|
|
467
|
-
async def test_subscribe_for_hint(self, simulator_and_wallet, self_hostname):
|
|
468
|
-
num_blocks = 4
|
|
469
|
-
full_nodes, wallets, bt = simulator_and_wallet
|
|
470
|
-
full_node_api = full_nodes[0]
|
|
471
|
-
wallet_node, server_2 = wallets[0]
|
|
472
|
-
fn_server = full_node_api.full_node.server
|
|
473
|
-
wsm: WalletStateManager = wallet_node.wallet_state_manager
|
|
474
|
-
|
|
475
|
-
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
476
|
-
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
477
|
-
|
|
478
|
-
wt: WalletTool = bt.get_pool_wallet_tool()
|
|
479
|
-
ph = wt.get_new_puzzlehash()
|
|
480
|
-
for i in range(0, num_blocks):
|
|
481
|
-
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
482
|
-
|
|
483
|
-
await asyncio.sleep(6)
|
|
484
|
-
coins = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hashes(False, [ph])
|
|
485
|
-
coin_spent = coins[0].coin
|
|
486
|
-
hint_puzzle_hash = 32 * b"\2"
|
|
487
|
-
amount = 1
|
|
488
|
-
amount_bin = int_to_bytes(1)
|
|
489
|
-
hint = 32 * b"\5"
|
|
490
|
-
|
|
491
|
-
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
492
|
-
msg = wallet_protocol.RegisterForPhUpdates([hint], 0)
|
|
493
|
-
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
494
|
-
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
495
|
-
data_response: RespondToPhUpdates = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
496
|
-
assert len(data_response.coin_states) == 0
|
|
497
|
-
|
|
498
|
-
condition_dict = {
|
|
499
|
-
ConditionOpcode.CREATE_COIN: [
|
|
500
|
-
ConditionWithArgs(ConditionOpcode.CREATE_COIN, [hint_puzzle_hash, amount_bin, hint])
|
|
501
|
-
]
|
|
502
|
-
}
|
|
503
|
-
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
504
|
-
|
|
505
|
-
tx: SpendBundle = wt.generate_signed_transaction(
|
|
506
|
-
10,
|
|
507
|
-
wt.get_new_puzzlehash(),
|
|
508
|
-
coin_spent,
|
|
509
|
-
condition_dic=condition_dict,
|
|
510
|
-
)
|
|
511
|
-
await full_node_api.respond_transaction(RespondTransaction(tx), fake_wallet_peer)
|
|
512
|
-
|
|
513
|
-
await full_node_api.process_spend_bundles(bundles=[tx])
|
|
514
|
-
|
|
515
|
-
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
429
|
+
for coin_rec in coin_records:
|
|
430
|
+
msg = wallet_protocol.RegisterForCoinUpdates([coin_rec.name], uint32(0))
|
|
431
|
+
msg_response = await full_node_api.register_interest_in_coin(msg, fake_wallet_peer)
|
|
432
|
+
assert msg_response is not None
|
|
516
433
|
|
|
434
|
+
fork_height = uint32(expected_height - num_blocks - 5)
|
|
435
|
+
req = ReorgProtocol(fork_height, uint32(expected_height + 5), zero_ph, None)
|
|
436
|
+
await full_node_api.reorg_from_index_to_new_index(req)
|
|
437
|
+
|
|
438
|
+
coin_records = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(True, puzzle_hash)
|
|
439
|
+
assert coin_records == []
|
|
440
|
+
|
|
441
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
442
|
+
|
|
443
|
+
coin_update_messages: List[CoinStateUpdate] = []
|
|
444
|
+
for message in all_messages:
|
|
445
|
+
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
446
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
447
|
+
coin_update_messages.append(coin_state_update)
|
|
448
|
+
|
|
449
|
+
assert len(coin_update_messages) == 1
|
|
450
|
+
update = coin_update_messages[0]
|
|
451
|
+
coin_states = update.items
|
|
452
|
+
assert len(coin_states) == 2
|
|
453
|
+
first_coin = coin_states[0]
|
|
454
|
+
assert first_coin.spent_height is None
|
|
455
|
+
assert first_coin.created_height is None
|
|
456
|
+
second_coin = coin_states[1]
|
|
457
|
+
assert second_coin.spent_height is None
|
|
458
|
+
assert second_coin.created_height is None
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
@pytest.mark.anyio
|
|
462
|
+
async def test_subscribe_for_hint(simulator_and_wallet: OldSimulatorsAndWallets, self_hostname: str) -> None:
|
|
463
|
+
num_blocks = 4
|
|
464
|
+
full_nodes, wallets, bt = simulator_and_wallet
|
|
465
|
+
full_node_api = full_nodes[0]
|
|
466
|
+
wallet_node, server_2 = wallets[0]
|
|
467
|
+
fn_server = full_node_api.full_node.server
|
|
468
|
+
|
|
469
|
+
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
470
|
+
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
471
|
+
|
|
472
|
+
wt = bt.get_pool_wallet_tool()
|
|
473
|
+
ph = wt.get_new_puzzlehash()
|
|
474
|
+
for _ in range(num_blocks):
|
|
475
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
476
|
+
|
|
477
|
+
await asyncio.sleep(6)
|
|
478
|
+
coins = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hashes(False, [ph])
|
|
479
|
+
coin_spent = coins[0].coin
|
|
480
|
+
hint_puzzle_hash = 32 * b"\2"
|
|
517
|
-
amount = 1
|
|
481
|
++
amount = uint64(1)
|
|
482
|
+
amount_bin = int_to_bytes(1)
|
|
483
|
+
hint = bytes32(32 * b"\5")
|
|
484
|
+
|
|
485
|
+
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
486
|
+
msg = wallet_protocol.RegisterForPhUpdates([hint], uint32(0))
|
|
487
|
+
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
488
|
+
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
489
|
+
data_response = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
490
|
+
assert len(data_response.coin_states) == 0
|
|
491
|
+
|
|
492
|
+
condition_dict = {
|
|
493
|
+
ConditionOpcode.CREATE_COIN: [
|
|
494
|
+
ConditionWithArgs(ConditionOpcode.CREATE_COIN, [hint_puzzle_hash, amount_bin, hint])
|
|
495
|
+
]
|
|
496
|
+
}
|
|
497
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
498
|
+
|
|
499
|
+
tx = wt.generate_signed_transaction(uint64(10), wt.get_new_puzzlehash(), coin_spent, condition_dic=condition_dict)
|
|
500
|
+
await full_node_api.respond_transaction(RespondTransaction(tx), fake_wallet_peer)
|
|
501
|
+
|
|
502
|
+
await full_node_api.process_spend_bundles(bundles=[tx])
|
|
503
|
+
|
|
504
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
505
|
+
|
|
506
|
+
notified_state = None
|
|
507
|
+
|
|
508
|
+
for message in all_messages:
|
|
509
|
+
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
510
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
511
|
+
notified_state = coin_state_update
|
|
512
|
+
break
|
|
513
|
+
|
|
514
|
+
assert notified_state is not None
|
|
515
|
+
assert notified_state.items[0].coin == Coin(coin_spent.name(), hint_puzzle_hash, amount)
|
|
516
|
+
|
|
517
|
+
msg = wallet_protocol.RegisterForPhUpdates([hint], uint32(0))
|
|
518
|
+
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
519
|
+
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
520
|
+
response = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
521
|
+
# we have already subscribed to this puzzle hash. The full node will
|
|
522
|
+
# ignore the duplicate
|
|
523
|
+
assert response.coin_states == []
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
@pytest.mark.anyio
|
|
527
|
+
async def test_subscribe_for_puzzle_hash_coin_hint_duplicates(
|
|
528
|
+
simulator_and_wallet: OldSimulatorsAndWallets, self_hostname: str
|
|
529
|
+
) -> None:
|
|
530
|
+
[full_node_api], [[_, wallet_server]], bt = simulator_and_wallet
|
|
531
|
+
full_node_server = full_node_api.full_node.server
|
|
532
|
+
|
|
533
|
+
await wallet_server.start_client(PeerInfo(self_hostname, full_node_server.get_port()), None)
|
|
534
|
+
|
|
535
|
+
wt = bt.get_pool_wallet_tool()
|
|
536
|
+
ph = wt.get_new_puzzlehash()
|
|
537
|
+
await full_node_api.farm_blocks_to_puzzlehash(4, ph)
|
|
538
|
+
coins = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hashes(False, [ph])
|
|
539
|
+
wallet_connection = full_node_server.all_connections[wallet_server.node_id]
|
|
540
|
+
|
|
541
|
+
# Create a coin which is hinted with its own destination puzzle hash
|
|
542
|
+
tx = wt.generate_signed_transaction(
|
|
543
|
+
uint64(10),
|
|
544
|
+
wt.get_new_puzzlehash(),
|
|
545
|
+
coins[0].coin,
|
|
546
|
+
condition_dic={
|
|
547
|
+
ConditionOpcode.CREATE_COIN: [ConditionWithArgs(ConditionOpcode.CREATE_COIN, [ph, int_to_bytes(1), ph])]
|
|
548
|
+
},
|
|
549
|
+
)
|
|
550
|
+
await full_node_api.respond_transaction(RespondTransaction(tx), wallet_connection)
|
|
551
|
+
await full_node_api.process_spend_bundles(bundles=[tx])
|
|
552
|
+
# Query the coin states and make sure it doesn't contain duplicated entries
|
|
553
|
+
msg = wallet_protocol.RegisterForPhUpdates([ph], uint32(0))
|
|
554
|
+
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, wallet_connection)
|
|
555
|
+
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
556
|
+
response = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
557
|
+
assert len(response.coin_states) > 0
|
|
558
|
+
assert len(set(response.coin_states)) == len(response.coin_states)
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
@pytest.mark.anyio
|
|
562
|
+
async def test_subscribe_for_hint_long_sync(
|
|
563
|
+
wallet_two_node_simulator: OldSimulatorsAndWallets, self_hostname: str
|
|
564
|
+
) -> None:
|
|
565
|
+
full_nodes, wallets, bt = wallet_two_node_simulator
|
|
566
|
+
full_node_api = full_nodes[0]
|
|
567
|
+
full_node_api_1 = full_nodes[1]
|
|
568
|
+
wallet_node, server_2 = wallets[0]
|
|
569
|
+
fn_server = full_node_api.full_node.server
|
|
570
|
+
fn_server_1 = full_node_api_1.full_node.server
|
|
571
|
+
|
|
572
|
+
await server_2.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
573
|
+
incoming_queue, peer_id = await add_dummy_connection(fn_server, self_hostname, 12312, NodeType.WALLET)
|
|
574
|
+
incoming_queue_1, peer_id_1 = await add_dummy_connection(fn_server_1, self_hostname, 12313, NodeType.WALLET)
|
|
575
|
+
|
|
576
|
+
wt = bt.get_pool_wallet_tool()
|
|
577
|
+
ph = wt.get_new_puzzlehash()
|
|
578
|
+
for _ in range(2):
|
|
579
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
580
|
+
|
|
581
|
+
coins = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hashes(False, [ph])
|
|
582
|
+
coin_spent = coins[0].coin
|
|
583
|
+
hint_puzzle_hash = 32 * b"\2"
|
|
518
|
-
amount = 1
|
|
584
|
++
amount = uint64(1)
|
|
585
|
+
amount_bin = int_to_bytes(1)
|
|
586
|
+
hint = bytes32(32 * b"\5")
|
|
587
|
+
|
|
588
|
+
fake_wallet_peer = fn_server.all_connections[peer_id]
|
|
589
|
+
fake_wallet_peer_1 = fn_server_1.all_connections[peer_id_1]
|
|
590
|
+
msg = wallet_protocol.RegisterForPhUpdates([hint], uint32(0))
|
|
591
|
+
msg_response = await full_node_api.register_interest_in_puzzle_hash(msg, fake_wallet_peer)
|
|
592
|
+
await full_node_api_1.register_interest_in_puzzle_hash(msg, fake_wallet_peer_1)
|
|
593
|
+
|
|
594
|
+
assert msg_response.type == ProtocolMessageTypes.respond_to_ph_update.value
|
|
595
|
+
data_response = RespondToCoinUpdates.from_bytes(msg_response.data)
|
|
596
|
+
assert len(data_response.coin_states) == 0
|
|
597
|
+
|
|
598
|
+
condition_dict = {
|
|
599
|
+
ConditionOpcode.CREATE_COIN: [
|
|
600
|
+
ConditionWithArgs(ConditionOpcode.CREATE_COIN, [hint_puzzle_hash, amount_bin, hint])
|
|
601
|
+
]
|
|
602
|
+
}
|
|
603
|
+
await full_node_api.wait_for_wallet_synced(wallet_node=wallet_node, timeout=20)
|
|
604
|
+
|
|
605
|
+
tx = wt.generate_signed_transaction(uint64(10), wt.get_new_puzzlehash(), coin_spent, condition_dic=condition_dict)
|
|
606
|
+
await full_node_api.respond_transaction(RespondTransaction(tx), fake_wallet_peer)
|
|
607
|
+
|
|
608
|
+
await full_node_api.process_spend_bundles(bundles=[tx])
|
|
609
|
+
|
|
610
|
+
# Create more blocks than recent "short_sync_blocks_behind_threshold" so that node enters batch
|
|
611
|
+
blocks_to_farm = full_node_api.full_node.config.get("short_sync_blocks_behind_threshold", 100)
|
|
612
|
+
for _ in range(blocks_to_farm):
|
|
613
|
+
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
|
|
614
|
+
|
|
615
|
+
node1_height = full_node_api_1.full_node.blockchain.get_peak_height()
|
|
616
|
+
assert node1_height is None
|
|
617
|
+
|
|
618
|
+
await fn_server_1.start_client(PeerInfo(self_hostname, fn_server.get_port()), None)
|
|
619
|
+
node0_height = full_node_api.full_node.blockchain.get_peak_height()
|
|
620
|
+
await time_out_assert(60, full_node_api_1.full_node.blockchain.get_peak_height, node0_height)
|
|
621
|
+
|
|
622
|
+
all_messages = await get_all_messages_in_queue(incoming_queue)
|
|
623
|
+
all_messages_1 = await get_all_messages_in_queue(incoming_queue_1)
|
|
624
|
+
|
|
625
|
+
def check_messages_for_hint(messages: List[Message]) -> None:
|
|
519
626
|
notified_state = None
|
|
520
627
|
|
|
521
|
-
for message in
|
|
628
|
+
for message in messages:
|
|
522
629
|
if message.type == ProtocolMessageTypes.coin_state_update.value:
|
|
523
|
-
|
|
524
|
-
notified_state =
|
|
630
|
+
coin_state_update = CoinStateUpdate.from_bytes(message.data)
|
|
631
|
+
notified_state = coin_state_update
|
|
525
632
|
break
|
|
526
633
|
|
|
527
634
|
assert notified_state is not None
|
|
@@@ -12,6 -12,6 +12,7 @@@ from chia.types.coin_spend import make_
|
|
|
12
12
|
from chia.types.condition_opcodes import ConditionOpcode
|
|
13
13
|
from chia.types.spend_bundle import SpendBundle
|
|
14
14
|
from chia.util.hash import std_hash
|
|
15
|
++
from chia.util.ints import uint64
|
|
15
16
|
from chia.wallet.util.debug_spend_bundle import debug_spend_bundle
|
|
16
17
|
|
|
17
18
|
|
|
@@@ -22,9 -22,9 +23,9 @@@ def test_debug_spend_bundle() -> None
|
|
|
22
23
|
sig = AugSchemeMPL.sign(sk, msg)
|
|
23
24
|
ACS = Program.to(15).curry(Program.to("hey").curry("now")).curry("brown", "cow")
|
|
24
25
|
ACS_PH = ACS.get_tree_hash()
|
|
25
|
--
coin: Coin = Coin(bytes32([0] * 32), ACS_PH, 3)
|
|
26
|
--
child_coin: Coin = Coin(coin.name(), ACS_PH, 0)
|
|
27
|
--
coin_bad_reveal: Coin = Coin(bytes32([0] * 32), bytes32([0] * 32), 0)
|
|
26
|
++
coin: Coin = Coin(bytes32([0] * 32), ACS_PH, uint64(3))
|
|
27
|
++
child_coin: Coin = Coin(coin.name(), ACS_PH, uint64(0))
|
|
28
|
++
coin_bad_reveal: Coin = Coin(bytes32([0] * 32), bytes32([0] * 32), uint64(0))
|
|
28
29
|
solution = Program.to(
|
|
29
30
|
[
|
|
30
31
|
[ConditionOpcode.AGG_SIG_UNSAFE, pk, msg],
|
|
@@@ -10,10 -10,10 +10,10 @@@ from chia.types.blockchain_format.coin
|
|
|
10
10
|
from chia.types.blockchain_format.program import Program
|
|
11
11
|
from chia.types.blockchain_format.serialized_program import SerializedProgram
|
|
12
12
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
13
|
-
from chia.types.coin_spend import CoinSpend
|
|
13
|
+
from chia.types.coin_spend import CoinSpend, make_spend
|
|
14
14
|
from chia.types.condition_opcodes import ConditionOpcode
|
|
15
15
|
from chia.util.db_wrapper import DBWrapper2, manage_connection
|
|
16
|
--
from chia.util.ints import uint32
|
|
16
|
++
from chia.util.ints import uint32, uint64
|
|
17
17
|
from chia.wallet.derivation_record import DerivationRecord
|
|
18
18
|
from chia.wallet.derive_keys import master_sk_to_wallet_sk, master_sk_to_wallet_sk_unhardened
|
|
19
19
|
from chia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
|
|
@@@ -45,15 -44,15 +45,15 @@@ msg2: bytes = b"msg2
|
|
|
45
45
|
|
|
46
46
|
additional_data: bytes32 = bytes32(DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA)
|
|
47
47
|
|
|
48
|
--
coin: Coin = Coin(bytes32([0] * 32), bytes32([0] * 32), 0)
|
|
48
|
++
coin: Coin = Coin(bytes32([0] * 32), bytes32([0] * 32), uint64(0))
|
|
49
49
|
puzzle = SerializedProgram.from_bytes(b"\x01")
|
|
50
50
|
solution_h = SerializedProgram.from_program(
|
|
51
|
-
Program.to([[ConditionOpcode.AGG_SIG_UNSAFE, pk1_h, msg1], [ConditionOpcode.AGG_SIG_ME,
|
|
51
|
+
Program.to([[ConditionOpcode.AGG_SIG_UNSAFE, pk1_h, msg1], [ConditionOpcode.AGG_SIG_ME, pk2_h_synth, msg2]])
|
|
52
52
|
)
|
|
53
53
|
solution_u = SerializedProgram.from_program(
|
|
54
|
-
Program.to([[ConditionOpcode.AGG_SIG_UNSAFE, pk1_u, msg1], [ConditionOpcode.AGG_SIG_ME,
|
|
54
|
+
Program.to([[ConditionOpcode.AGG_SIG_UNSAFE, pk1_u, msg1], [ConditionOpcode.AGG_SIG_ME, pk2_u_synth, msg2]])
|
|
55
55
|
)
|
|
56
|
-
spend_h: CoinSpend =
|
|
56
|
+
spend_h: CoinSpend = make_spend(
|
|
57
57
|
coin,
|
|
58
58
|
puzzle,
|
|
59
59
|
solution_h,
|
|
@@@ -74,7 -73,7 +74,7 @@@ def test_only_one_odd_coin_created() -
|
|
|
74
74
|
solution = Program.to(
|
|
75
75
|
[
|
|
76
76
|
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
|
|
77
|
-
Program.to(binutils.assemble(clsp)),
|
|
78
|
-
Program.to(binutils.assemble("(q (51 0xcafef00d 203) (51 0xfadeddab 205))")),
|
|
77
|
++
Program.to(binutils.assemble(clsp)),
|
|
79
78
|
[0xDEADBEEF, 0xCAFEF00D, 411],
|
|
80
79
|
411,
|
|
81
80
|
[],
|
|
@@@ -88,7 -87,7 +88,7 @@@
|
|
|
88
88
|
solution = Program.to(
|
|
89
89
|
[
|
|
90
90
|
(singleton_mod_hash, (LAUNCHER_ID, LAUNCHER_PUZZLE_HASH)),
|
|
91
|
-
Program.to(binutils.assemble(clsp)),
|
|
92
|
-
Program.to(binutils.assemble("(q (51 0xcafef00d 203) (51 0xfadeddab 204) (51 0xdeadbeef 202))")),
|
|
91
|
++
Program.to(binutils.assemble(clsp)),
|
|
93
92
|
[0xDEADBEEF, 0xCAFEF00D, 411],
|
|
94
93
|
411,
|
|
95
94
|
[],
|
|
@@@ -1,7 -1,7 +1,7 @@@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
|
-
from typing import Any, Callable, Dict, List, Optional, Tuple, cast
|
|
5
|
-
from typing import Any, Callable, Dict, List, Optional, Tuple
|
|
4
|
++
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, cast
|
|
6
5
|
|
|
7
6
|
from chia_rs import G1Element, G2Element
|
|
8
7
|
from clvm_tools import binutils
|
|
@@@ -15,6 -15,6 +15,7 @@@ from chia.types.coin_spend import CoinS
|
|
|
15
15
|
from chia.types.condition_opcodes import ConditionOpcode
|
|
16
16
|
from chia.types.spend_bundle import SpendBundle
|
|
17
17
|
from chia.util.ints import uint32, uint64
|
|
18
|
++
from chia.util.misc import satisfies_hint
|
|
18
19
|
from chia.wallet.conditions import AssertCoinAnnouncement
|
|
19
20
|
from chia.wallet.puzzles.load_clvm import load_clvm
|
|
20
21
|
from chia.wallet.util.debug_spend_bundle import debug_spend_bundle
|
|
@@@ -49,12 -49,12 +50,16 @@@ class PuzzleDB
|
|
|
49
50
|
return self._db.get(puzzle_hash)
|
|
50
51
|
|
|
51
52
|
|
|
52
|
-
|
|
53
|
-
|
|
53
|
++
T = TypeVar("T")
|
|
54
|
++
|
|
55
|
++
|
|
56
|
++
def from_kwargs(kwargs: Dict[str, Any], key: str, type_info: Type[T]) -> T:
|
|
54
57
|
"""Raise an exception if `kwargs[key]` is missing or the wrong type"""
|
|
55
|
--
"""for now, we just check that it's present"""
|
|
56
58
|
if key not in kwargs:
|
|
57
59
|
raise ValueError(f"`{key}` missing in call to `solve`")
|
|
58
|
--
|
|
60
|
++
if not satisfies_hint(kwargs[key], type_info):
|
|
61
|
++
raise TypeError(f"`{key}` must be of type {type_info} but is of type {type(kwargs[key])}")
|
|
62
|
++
return cast(T, kwargs[key])
|
|
59
63
|
|
|
60
64
|
|
|
61
65
|
Solver_F = Callable[["Solver", PuzzleDB, List[Program], Any], Program]
|
|
@@@ -92,16 -92,15 +97,16 @@@ class Solver
|
|
|
92
97
|
raise ValueError("can't solve")
|
|
93
98
|
|
|
94
99
|
|
|
95
|
-
def solve_launcher(solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict) -> Program:
|
|
96
|
-
launcher_amount = from_kwargs(kwargs, "launcher_amount", int)
|
|
100
|
+
def solve_launcher(solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict[str, Any]) -> Program:
|
|
97
|
-
launcher_amount = from_kwargs(kwargs, "launcher_amount",
|
|
101
|
++
launcher_amount = from_kwargs(kwargs, "launcher_amount", uint64)
|
|
98
102
|
destination_puzzle_hash = from_kwargs(kwargs, "destination_puzzle_hash", bytes32)
|
|
99
|
--
metadata = from_kwargs(kwargs, "metadata", List[Tuple[str,
|
|
103
|
++
metadata = from_kwargs(kwargs, "metadata", List[Tuple[str, str]])
|
|
100
104
|
solution = Program.to([destination_puzzle_hash, launcher_amount, metadata])
|
|
101
|
-
|
|
105
|
+
# TODO: Remove cast when we improve typing
|
|
106
|
+
return cast(Program, solution)
|
|
102
107
|
|
|
103
108
|
|
|
104
|
-
def solve_anyone_can_spend(solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict) -> Program:
|
|
109
|
+
def solve_anyone_can_spend(solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict[str, Any]) -> Program:
|
|
105
110
|
"""
|
|
106
111
|
This is the anyone-can-spend puzzle `1`. Note that farmers can easily steal this coin, so don't use
|
|
107
112
|
it except for testing.
|
|
@@@ -127,66 -124,58 +132,66 @@@ def solve_singleton(solver: Solver, puz
|
|
|
127
132
|
`lineage_proof`: a `Program` that proves the parent is also a singleton (or the launcher).
|
|
128
133
|
`coin_amount`: a necessarily-odd value of mojos in this coin.
|
|
129
134
|
"""
|
|
130
|
-
|
|
135
|
+
_, inner_puzzle = args
|
|
131
136
|
inner_solution = solver.solve(puzzle_db, inner_puzzle, **kwargs)
|
|
132
137
|
lineage_proof = from_kwargs(kwargs, "lineage_proof", Program)
|
|
133
|
--
coin_amount = from_kwargs(kwargs, "coin_amount",
|
|
138
|
++
coin_amount = from_kwargs(kwargs, "coin_amount", uint64)
|
|
134
139
|
solution = inner_solution.to([lineage_proof, coin_amount, inner_solution.rest()])
|
|
135
|
-
|
|
140
|
+
# TODO: Remove cast when we improve typing
|
|
141
|
+
return cast(Program, solution)
|
|
136
142
|
|
|
137
143
|
|
|
138
|
-
def solve_pool_member(solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict) -> Program:
|
|
139
|
-
pool_member_spend_type = from_kwargs(kwargs, "pool_member_spend_type")
|
|
144
|
+
def solve_pool_member(solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict[str, Any]) -> Program:
|
|
140
|
-
pool_member_spend_type = from_kwargs(kwargs, "pool_member_spend_type")
|
|
145
|
++
pool_member_spend_type = from_kwargs(kwargs, "pool_member_spend_type", str)
|
|
141
146
|
allowable = ["to-waiting-room", "claim-p2-nft"]
|
|
142
147
|
if pool_member_spend_type not in allowable:
|
|
143
148
|
raise ValueError("`pool_member_spend_type` must be one of %s for POOL_MEMBER puzzle" % "/".join(allowable))
|
|
144
149
|
to_waiting_room = pool_member_spend_type == "to-waiting-room"
|
|
145
150
|
if to_waiting_room:
|
|
146
|
--
key_value_list = from_kwargs(kwargs, "key_value_list",
|
|
147
|
-
|
|
151
|
++
key_value_list = from_kwargs(kwargs, "key_value_list", Program)
|
|
152
|
+
# TODO: Remove cast when we improve typing
|
|
153
|
+
return cast(Program, Program.to([0, 1, 0, 0, key_value_list]))
|
|
148
154
|
# it's an "absorb_pool_reward" type
|
|
149
|
--
pool_reward_amount = from_kwargs(kwargs, "pool_reward_amount",
|
|
155
|
++
pool_reward_amount = from_kwargs(kwargs, "pool_reward_amount", uint64)
|
|
150
156
|
pool_reward_height = from_kwargs(kwargs, "pool_reward_height", int)
|
|
151
157
|
solution = Program.to([0, pool_reward_amount, pool_reward_height])
|
|
152
|
-
|
|
158
|
+
# TODO: Remove cast when we improve typing
|
|
159
|
+
return cast(Program, solution)
|
|
153
160
|
|
|
154
161
|
|
|
155
|
-
def solve_pool_waiting_room(
|
|
156
|
-
|
|
162
|
+
def solve_pool_waiting_room(
|
|
163
|
+
solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict[str, Any]
|
|
164
|
+
) -> Program:
|
|
157
|
-
pool_leaving_spend_type = from_kwargs(kwargs, "pool_leaving_spend_type")
|
|
165
|
++
pool_leaving_spend_type = from_kwargs(kwargs, "pool_leaving_spend_type", str)
|
|
158
166
|
allowable = ["exit-waiting-room", "claim-p2-nft"]
|
|
159
167
|
if pool_leaving_spend_type not in allowable:
|
|
160
168
|
raise ValueError("`pool_leaving_spend_type` must be one of %s for POOL_MEMBER puzzle" % "/".join(allowable))
|
|
161
169
|
exit_waiting_room = pool_leaving_spend_type == "exit-waiting-room"
|
|
162
170
|
if exit_waiting_room:
|
|
163
|
--
key_value_list = from_kwargs(kwargs, "key_value_list", List[Tuple[str,
|
|
164
|
--
destination_puzzle_hash = from_kwargs(kwargs, "destination_puzzle_hash",
|
|
165
|
-
|
|
171
|
++
key_value_list = from_kwargs(kwargs, "key_value_list", List[Tuple[str, str]])
|
|
172
|
++
destination_puzzle_hash = from_kwargs(kwargs, "destination_puzzle_hash", bytes32)
|
|
173
|
+
# TODO: Remove cast when we improve typing
|
|
174
|
+
return cast(Program, Program.to([0, 1, key_value_list, destination_puzzle_hash]))
|
|
166
175
|
# it's an "absorb_pool_reward" type
|
|
167
|
--
pool_reward_amount = from_kwargs(kwargs, "pool_reward_amount",
|
|
176
|
++
pool_reward_amount = from_kwargs(kwargs, "pool_reward_amount", uint64)
|
|
168
177
|
pool_reward_height = from_kwargs(kwargs, "pool_reward_height", int)
|
|
169
178
|
solution = Program.to([0, 0, pool_reward_amount, pool_reward_height])
|
|
170
|
-
|
|
179
|
+
# TODO: Remove cast when we improve typing
|
|
180
|
+
return cast(Program, solution)
|
|
171
181
|
|
|
172
182
|
|
|
173
|
-
def solve_p2_singleton(solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict) -> Program:
|
|
174
|
-
p2_singleton_spend_type = from_kwargs(kwargs, "p2_singleton_spend_type")
|
|
183
|
+
def solve_p2_singleton(solver: Solver, puzzle_db: PuzzleDB, args: List[Program], kwargs: Dict[str, Any]) -> Program:
|
|
175
|
-
p2_singleton_spend_type = from_kwargs(kwargs, "p2_singleton_spend_type")
|
|
184
|
++
p2_singleton_spend_type = from_kwargs(kwargs, "p2_singleton_spend_type", str)
|
|
176
185
|
allowable = ["claim-p2-nft", "delayed-spend"]
|
|
177
186
|
if p2_singleton_spend_type not in allowable:
|
|
178
187
|
raise ValueError("`p2_singleton_spend_type` must be one of %s for P2_SINGLETON puzzle" % "/".join(allowable))
|
|
179
188
|
claim_p2_nft = p2_singleton_spend_type == "claim-p2-nft"
|
|
180
189
|
if claim_p2_nft:
|
|
181
|
--
singleton_inner_puzzle_hash = from_kwargs(kwargs, "singleton_inner_puzzle_hash")
|
|
182
|
--
p2_singleton_coin_name = from_kwargs(kwargs, "p2_singleton_coin_name")
|
|
190
|
++
singleton_inner_puzzle_hash = from_kwargs(kwargs, "singleton_inner_puzzle_hash", bytes32)
|
|
191
|
++
p2_singleton_coin_name = from_kwargs(kwargs, "p2_singleton_coin_name", bytes)
|
|
183
192
|
solution = Program.to([singleton_inner_puzzle_hash, p2_singleton_coin_name])
|
|
184
|
-
|
|
193
|
+
# TODO: Remove cast when we improve typing
|
|
194
|
+
return cast(Program, solution)
|
|
185
195
|
raise ValueError("can't solve `delayed-spend` yet")
|
|
186
196
|
|
|
187
197
|
|
|
@@@ -268,9 -258,7 +273,9 @@@ def adaptor_for_singleton_inner_puzzle(
|
|
|
268
273
|
puzzle to work as a singleton inner puzzle.
|
|
269
274
|
"""
|
|
270
275
|
# this is pretty slow and lame
|
|
271
|
-
program = binutils.assemble("(a (q . %s) 3)" % binutils.disassemble(puzzle))
|
|
272
|
-
|
|
276
|
++
program = binutils.assemble("(a (q . %s) 3)" % binutils.disassemble(puzzle))
|
|
277
|
+
# TODO: Remove cast when we improve typing
|
|
278
|
+
return cast(Program, Program.to(program))
|
|
273
279
|
|
|
274
280
|
|
|
275
281
|
def launcher_conditions_and_spend_bundle(
|
|
@@@ -290,13 -277,21 +295,13 @@@
|
|
|
290
295
|
singleton_full_puzzle_hash = singleton_full_puzzle.get_tree_hash()
|
|
291
296
|
message_program = Program.to([singleton_full_puzzle_hash, launcher_amount, metadata])
|
|
292
297
|
expected_announcement = AssertCoinAnnouncement(
|
|
293
|
-
asserted_id=
|
|
298
|
+
asserted_id=launcher_id, asserted_msg=message_program.get_tree_hash()
|
|
294
299
|
)
|
|
295
300
|
expected_conditions = []
|
|
296
|
-
|
|
297
|
-
Program.to(
|
|
298
|
-
binutils.assemble(
|
|
299
|
-
f"(0x{ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT.hex()} 0x{expected_announcement.msg_calc})"
|
|
300
|
-
)
|
|
301
|
-
)
|
|
302
|
-
)
|
|
303
|
-
expected_conditions.append(
|
|
304
|
-
Program.to(
|
|
305
|
-
binutils.assemble(f"(0x{ConditionOpcode.CREATE_COIN.hex()} 0x{launcher_puzzle_hash} {launcher_amount})")
|
|
306
|
-
)
|
|
307
|
-
)
|
|
301
|
+
clsp = f"(0x{ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT.hex()} 0x{expected_announcement.msg_calc})"
|
|
308
|
-
expected_conditions.append(Program.to(binutils.assemble(clsp)))
|
|
302
|
++
expected_conditions.append(Program.to(binutils.assemble(clsp)))
|
|
303
|
+
clsp = f"(0x{ConditionOpcode.CREATE_COIN.hex()} 0x{launcher_puzzle_hash} {launcher_amount})"
|
|
309
|
-
expected_conditions.append(Program.to(binutils.assemble(clsp)))
|
|
304
|
++
expected_conditions.append(Program.to(binutils.assemble(clsp)))
|
|
310
305
|
solution = solve_puzzle(
|
|
311
306
|
puzzle_db,
|
|
312
307
|
launcher_puzzle,
|
|
@@@ -737,7 -741,7 +742,7 @@@ def test_lifecycle_with_coinstore_as_wa
|
|
|
737
742
|
# now spend to oblivion with the `-113` hack
|
|
738
743
|
|
|
739
744
|
coin_spend = SINGLETON_WALLET.coin_spend_for_conditions(
|
|
740
|
--
PUZZLE_DB, conditions=[[ConditionOpcode.CREATE_COIN, 0, -113]]
|
|
745
|
++
PUZZLE_DB, conditions=[Program.to([ConditionOpcode.CREATE_COIN, 0, -113])]
|
|
741
746
|
)
|
|
742
747
|
spend_bundle = SpendBundle([coin_spend], G2Element())
|
|
743
748
|
debug_spend_bundle(spend_bundle)
|
|
@@@ -25,7 -25,7 +25,7 @@@ def get_record(wallet_id: uint32 = uint
|
|
|
25
25
|
inner_puz_hash = inner_puz.get_tree_hash()
|
|
26
26
|
parent_puz = create_singleton_puzzle(inner_puz, launcher_id)
|
|
27
27
|
parent_puz_hash = parent_puz.get_tree_hash()
|
|
28
|
--
parent_coin = Coin(launcher_id, parent_puz_hash, 1)
|
|
28
|
++
parent_coin = Coin(launcher_id, parent_puz_hash, uint64(1))
|
|
29
29
|
inner_sol = Program.to([[51, inner_puz_hash, 1]])
|
|
30
30
|
lineage_proof = LineageProof(launcher_id, inner_puz.get_tree_hash(), uint64(1))
|
|
31
31
|
parent_sol = Program.to([lineage_proof.to_program(), 1, inner_sol])
|
|
@@@ -71,7 -71,7 +71,7 @@@ class TestSingletonStore
|
|
|
71
71
|
async with DBConnection(1) as wrapper:
|
|
72
72
|
db = await WalletSingletonStore.create(wrapper)
|
|
73
73
|
record = get_record()
|
|
74
|
--
child_coin = Coin(record.coin.name(), record.coin.puzzle_hash, 1)
|
|
74
|
++
child_coin = Coin(record.coin.name(), record.coin.puzzle_hash, uint64(1))
|
|
75
75
|
parent_coinspend = record.parent_coinspend
|
|
76
76
|
|
|
77
77
|
# test add spend
|
|
@@@ -82,9 -82,9 +82,9 @@@
|
|
|
82
82
|
# Test adding a non-singleton will fail
|
|
83
83
|
inner_puz = Program.to(1)
|
|
84
84
|
inner_puz_hash = inner_puz.get_tree_hash()
|
|
85
|
--
bad_coin = Coin(record.singleton_id, inner_puz_hash, 1)
|
|
85
|
++
bad_coin = Coin(record.singleton_id, inner_puz_hash, uint64(1))
|
|
86
86
|
inner_sol = Program.to([[51, inner_puz_hash, 1]])
|
|
87
|
-
bad_coinspend =
|
|
87
|
+
bad_coinspend = make_spend(bad_coin, inner_puz, inner_sol)
|
|
88
88
|
with pytest.raises(RuntimeError) as e_info:
|
|
89
89
|
await db.add_spend(uint32(2), bad_coinspend, uint32(10))
|
|
90
90
|
assert e_info.value.args[0] == "Coin to add is not a valid singleton"
|
|
@@@ -500,7 -500,7 +500,7 @@@ async def test_add_states_from_peer_unt
|
|
|
500
500
|
wallet_node._close()
|
|
501
501
|
coin_generator = CoinGenerator()
|
|
502
502
|
# Generate enough coin states to fill up the max number validation/add tasks.
|
|
503
|
--
coin_states = [CoinState(coin_generator.get().coin, i, i) for i in range(3000)]
|
|
503
|
++
coin_states = [CoinState(coin_generator.get().coin, uint32(i), uint32(i)) for i in range(3000)]
|
|
504
504
|
with caplog.at_level(logging.INFO):
|
|
505
505
|
assert not await wallet_node.add_states_from_peer(coin_states, list(wallet_server.all_connections.values())[0])
|
|
506
506
|
assert "Terminating receipt and validation due to shut down request" in caplog.text
|
|
@@@ -10,7 -10,8 +10,7 @@@ from chia.server.outbound_message impor
|
|
|
10
10
|
from chia.types.blockchain_format.coin import Coin
|
|
11
11
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
12
12
|
from chia.types.peer_info import PeerInfo
|
|
13
|
--
from chia.util.ints import uint32
|
|
13
|
++
from chia.util.ints import uint32, uint64
|
|
14
14
|
from chia.wallet.derivation_record import DerivationRecord
|
|
15
15
|
from chia.wallet.derive_keys import master_sk_to_wallet_sk, master_sk_to_wallet_sk_unhardened
|
|
16
16
|
from chia.wallet.util.wallet_types import WalletType
|
|
@@@ -93,5 -93,5 +93,5 @@@ async def test_determine_coin_type(simu
|
|
|
93
93
|
wallet_state_manager: WalletStateManager = wallet_node.wallet_state_manager
|
|
94
94
|
peer = wallet_node.server.get_connections(NodeType.FULL_NODE)[0]
|
|
95
95
|
assert (None, None) == await wallet_state_manager.determine_coin_type(
|
|
96
|
--
peer, CoinState(Coin(bytes32(b"1" * 32), bytes32(b"1" * 32), 0), uint32(0), uint32(0)), None
|
|
96
|
++
peer, CoinState(Coin(bytes32(b"1" * 32), bytes32(b"1" * 32), uint64(0)), uint32(0), uint32(0)), None
|
|
97
97
|
)
|
|
@@@ -6,19 -6,19 +6,19 @@@ import pytes
|
|
|
6
6
|
from chia_rs import Coin, CoinState
|
|
7
7
|
|
|
8
8
|
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
9
|
--
from chia.util.ints import uint64
|
|
9
|
++
from chia.util.ints import uint32, uint64
|
|
10
10
|
from chia.wallet.util.peer_request_cache import PeerRequestCache
|
|
11
11
|
from chia.wallet.util.wallet_sync_utils import sort_coin_states
|
|
12
12
|
|
|
13
13
|
coin_states = [
|
|
14
14
|
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\00" * 32), uint64(1)), None, None),
|
|
15
|
--
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\11" * 32), uint64(1)), None, 1),
|
|
16
|
--
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\22" * 32), uint64(1)), 1, 1),
|
|
17
|
--
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\33" * 32), uint64(1)), 1, 1),
|
|
18
|
--
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\44" * 32), uint64(1)), 2, 1),
|
|
19
|
--
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\55" * 32), uint64(1)), 2, 2),
|
|
20
|
--
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\66" * 32), uint64(1)), 20, 10),
|
|
21
|
--
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\77" * 32), uint64(1)), None, 20),
|
|
15
|
++
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\11" * 32), uint64(1)), None, uint32(1)),
|
|
16
|
++
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\22" * 32), uint64(1)), uint32(1), uint32(1)),
|
|
17
|
++
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\33" * 32), uint64(1)), uint32(1), uint32(1)),
|
|
18
|
++
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\44" * 32), uint64(1)), uint32(2), uint32(1)),
|
|
19
|
++
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\55" * 32), uint64(1)), uint32(2), uint32(2)),
|
|
20
|
++
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\66" * 32), uint64(1)), uint32(20), uint32(10)),
|
|
21
|
++
CoinState(Coin(bytes32(b"\00" * 32), bytes32(b"\77" * 32), uint64(1)), None, uint32(20)),
|
|
22
22
|
]
|
|
23
23
|
|
|
24
24
|
|
|
@@@ -32,7 -32,7 +32,11 @@@ def assert_race_cache(cache: PeerReques
|
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
def dummy_coin_state(*, created_height: Optional[int], spent_height: Optional[int]) -> CoinState:
|
|
35
|
--
return CoinState(
|
|
35
|
++
return CoinState(
|
|
36
|
++
Coin(bytes(b"0" * 32), bytes(b"0" * 32), uint64(0)),
|
|
37
|
++
uint32.construct_optional(spent_height),
|
|
38
|
++
uint32.construct_optional(created_height),
|
|
39
|
++
)
|
|
36
40
|
|
|
37
41
|
|
|
38
42
|
def heights(coin_states: Collection[CoinState]) -> List[Tuple[Optional[int], Optional[int]]]:
|